1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * kexec.c - kexec system call core code. 2 * kexec.c - kexec system call core code. 4 * Copyright (C) 2002-2004 Eric Biederman <eb 3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> >> 4 * >> 5 * This source code is licensed under the GNU General Public License, >> 6 * Version 2. See the file COPYING for more details. 5 */ 7 */ 6 8 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 10 9 #include <linux/btf.h> << 10 #include <linux/capability.h> 11 #include <linux/capability.h> 11 #include <linux/mm.h> 12 #include <linux/mm.h> 12 #include <linux/file.h> 13 #include <linux/file.h> 13 #include <linux/slab.h> 14 #include <linux/slab.h> 14 #include <linux/fs.h> 15 #include <linux/fs.h> 15 #include <linux/kexec.h> 16 #include <linux/kexec.h> 16 #include <linux/mutex.h> 17 #include <linux/mutex.h> 17 #include <linux/list.h> 18 #include <linux/list.h> 18 #include <linux/highmem.h> 19 #include <linux/highmem.h> 19 #include <linux/syscalls.h> 20 #include <linux/syscalls.h> 20 #include <linux/reboot.h> 21 #include <linux/reboot.h> 21 #include <linux/ioport.h> 22 #include <linux/ioport.h> 22 #include <linux/hardirq.h> 23 #include <linux/hardirq.h> 23 #include <linux/elf.h> 24 #include <linux/elf.h> 24 #include <linux/elfcore.h> 25 #include <linux/elfcore.h> 25 #include <linux/utsname.h> 26 #include <linux/utsname.h> 26 #include <linux/numa.h> 27 #include <linux/numa.h> 27 #include <linux/suspend.h> 28 #include <linux/suspend.h> 28 #include <linux/device.h> 29 #include <linux/device.h> 29 #include <linux/freezer.h> 30 #include <linux/freezer.h> 30 #include <linux/panic_notifier.h> << 31 #include <linux/pm.h> 31 #include <linux/pm.h> 32 #include <linux/cpu.h> 32 #include <linux/cpu.h> 33 #include <linux/uaccess.h> 33 #include <linux/uaccess.h> 34 #include <linux/io.h> 34 #include <linux/io.h> 35 #include <linux/console.h> 35 #include <linux/console.h> 36 #include <linux/vmalloc.h> 36 #include <linux/vmalloc.h> 37 #include <linux/swap.h> 37 #include <linux/swap.h> 38 #include <linux/syscore_ops.h> 38 #include <linux/syscore_ops.h> 39 #include <linux/compiler.h> 39 #include <linux/compiler.h> 40 #include <linux/hugetlb.h> 40 #include <linux/hugetlb.h> 41 #include <linux/objtool.h> << 42 #include <linux/kmsg_dump.h> << 43 41 44 #include <asm/page.h> 42 #include <asm/page.h> 45 #include <asm/sections.h> 43 #include <asm/sections.h> 46 44 47 #include <crypto/hash.h> 45 #include <crypto/hash.h> >> 46 #include <crypto/sha.h> 48 #include "kexec_internal.h" 47 #include "kexec_internal.h" 49 48 50 atomic_t __kexec_lock = ATOMIC_INIT(0); !! 49 DEFINE_MUTEX(kexec_mutex); >> 50 >> 51 /* Per cpu memory for storing cpu states in case of system crash. */ >> 52 note_buf_t __percpu *crash_notes; >> 53 >> 54 /* vmcoreinfo stuff */ >> 55 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; >> 56 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; >> 57 size_t vmcoreinfo_size; >> 58 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 51 59 52 /* Flag to indicate we are going to kexec a ne 60 /* Flag to indicate we are going to kexec a new kernel */ 53 bool kexec_in_progress = false; 61 bool kexec_in_progress = false; 54 62 55 bool kexec_file_dbg_print; !! 63 >> 64 /* Location of the reserved area for the crash kernel */ >> 65 struct resource crashk_res = { >> 66 .name = "Crash kernel", >> 67 .start = 0, >> 68 .end = 0, >> 69 .flags = IORESOURCE_BUSY | IORESOURCE_MEM >> 70 }; >> 71 struct resource crashk_low_res = { >> 72 .name = "Crash kernel", >> 73 .start = 0, >> 74 .end = 0, >> 75 .flags = IORESOURCE_BUSY | IORESOURCE_MEM >> 76 }; >> 77 >> 78 int kexec_should_crash(struct task_struct *p) >> 79 { >> 80 /* >> 81 * If crash_kexec_post_notifiers is enabled, don't run >> 82 * crash_kexec() here yet, which must be run after panic >> 83 * notifiers in panic(). >> 84 */ >> 85 if (crash_kexec_post_notifiers) >> 86 return 0; >> 87 /* >> 88 * There are 4 panic() calls in do_exit() path, each of which >> 89 * corresponds to each of these 4 conditions. >> 90 */ >> 91 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) >> 92 return 1; >> 93 return 0; >> 94 } 56 95 57 /* 96 /* 58 * When kexec transitions to the new kernel th 97 * When kexec transitions to the new kernel there is a one-to-one 59 * mapping between physical and virtual addres 98 * mapping between physical and virtual addresses. On processors 60 * where you can disable the MMU this is trivi 99 * where you can disable the MMU this is trivial, and easy. For 61 * others it is still a simple predictable pag 100 * others it is still a simple predictable page table to setup. 62 * 101 * 63 * In that environment kexec copies the new ke 102 * In that environment kexec copies the new kernel to its final 64 * resting place. This means I can only suppo 103 * resting place. This means I can only support memory whose 65 * physical address can fit in an unsigned lon 104 * physical address can fit in an unsigned long. In particular 66 * addresses where (pfn << PAGE_SHIFT) > ULONG 105 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 67 * If the assembly stub has more restrictive r 106 * If the assembly stub has more restrictive requirements 68 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_ME 107 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 69 * defined more restrictively in <asm/kexec.h> 108 * defined more restrictively in <asm/kexec.h>. 70 * 109 * 71 * The code for the transition from the curren 110 * The code for the transition from the current kernel to the 72 * new kernel is placed in the control_code_bu !! 111 * the new kernel is placed in the control_code_buffer, whose size 73 * is given by KEXEC_CONTROL_PAGE_SIZE. In th 112 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single 74 * page of memory is necessary, but some archi 113 * page of memory is necessary, but some architectures require more. 75 * Because this memory must be identity mapped 114 * Because this memory must be identity mapped in the transition from 76 * virtual to physical addresses it must live 115 * virtual to physical addresses it must live in the range 77 * 0 - TASK_SIZE, as only the user space mappi 116 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 78 * modifiable. 117 * modifiable. 79 * 118 * 80 * The assembly stub in the control code buffe 119 * The assembly stub in the control code buffer is passed a linked list 81 * of descriptor pages detailing the source pa 120 * of descriptor pages detailing the source pages of the new kernel, 82 * and the destination addresses of those sour 121 * and the destination addresses of those source pages. As this data 83 * structure is not used in the context of the 122 * structure is not used in the context of the current OS, it must 84 * be self-contained. 123 * be self-contained. 85 * 124 * 86 * The code has been made to work with highmem 125 * The code has been made to work with highmem pages and will use a 87 * destination page in its final resting place 126 * destination page in its final resting place (if it happens 88 * to allocate it). The end product of this i 127 * to allocate it). The end product of this is that most of the 89 * physical address space, and most of RAM can 128 * physical address space, and most of RAM can be used. 90 * 129 * 91 * Future directions include: 130 * Future directions include: 92 * - allocating a page table with the control 131 * - allocating a page table with the control code buffer identity 93 * mapped, to simplify machine_kexec and ma 132 * mapped, to simplify machine_kexec and make kexec_on_panic more 94 * reliable. 133 * reliable. 95 */ 134 */ 96 135 97 /* 136 /* 98 * KIMAGE_NO_DEST is an impossible destination 137 * KIMAGE_NO_DEST is an impossible destination address..., for 99 * allocating pages whose destination address 138 * allocating pages whose destination address we do not care about. 100 */ 139 */ 101 #define KIMAGE_NO_DEST (-1UL) 140 #define KIMAGE_NO_DEST (-1UL) 102 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) > << 103 141 104 static struct page *kimage_alloc_page(struct k 142 static struct page *kimage_alloc_page(struct kimage *image, 105 gfp_t g 143 gfp_t gfp_mask, 106 unsigne 144 unsigned long dest); 107 145 108 int sanity_check_segment_list(struct kimage *i 146 int sanity_check_segment_list(struct kimage *image) 109 { 147 { 110 int i; !! 148 int result, i; 111 unsigned long nr_segments = image->nr_ 149 unsigned long nr_segments = image->nr_segments; 112 unsigned long total_pages = 0; << 113 unsigned long nr_pages = totalram_page << 114 150 115 /* 151 /* 116 * Verify we have good destination add 152 * Verify we have good destination addresses. The caller is 117 * responsible for making certain we d 153 * responsible for making certain we don't attempt to load 118 * the new image into invalid or reser 154 * the new image into invalid or reserved areas of RAM. This 119 * just verifies it is an address we c 155 * just verifies it is an address we can use. 120 * 156 * 121 * Since the kernel does everything in 157 * Since the kernel does everything in page size chunks ensure 122 * the destination addresses are page 158 * the destination addresses are page aligned. Too many 123 * special cases crop of when we don't 159 * special cases crop of when we don't do this. The most 124 * insidious is getting overlapping de 160 * insidious is getting overlapping destination addresses 125 * simply because addresses are change 161 * simply because addresses are changed to page size 126 * granularity. 162 * granularity. 127 */ 163 */ >> 164 result = -EADDRNOTAVAIL; 128 for (i = 0; i < nr_segments; i++) { 165 for (i = 0; i < nr_segments; i++) { 129 unsigned long mstart, mend; 166 unsigned long mstart, mend; 130 167 131 mstart = image->segment[i].mem 168 mstart = image->segment[i].mem; 132 mend = mstart + image->segme 169 mend = mstart + image->segment[i].memsz; 133 if (mstart > mend) << 134 return -EADDRNOTAVAIL; << 135 if ((mstart & ~PAGE_MASK) || ( 170 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 136 return -EADDRNOTAVAIL; !! 171 return result; 137 if (mend >= KEXEC_DESTINATION_ 172 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) 138 return -EADDRNOTAVAIL; !! 173 return result; 139 } 174 } 140 175 141 /* Verify our destination addresses do 176 /* Verify our destination addresses do not overlap. 142 * If we alloed overlapping destinatio 177 * If we alloed overlapping destination addresses 143 * through very weird things can happe 178 * through very weird things can happen with no 144 * easy explanation as one segment sto 179 * easy explanation as one segment stops on another. 145 */ 180 */ >> 181 result = -EINVAL; 146 for (i = 0; i < nr_segments; i++) { 182 for (i = 0; i < nr_segments; i++) { 147 unsigned long mstart, mend; 183 unsigned long mstart, mend; 148 unsigned long j; 184 unsigned long j; 149 185 150 mstart = image->segment[i].mem 186 mstart = image->segment[i].mem; 151 mend = mstart + image->segme 187 mend = mstart + image->segment[i].memsz; 152 for (j = 0; j < i; j++) { 188 for (j = 0; j < i; j++) { 153 unsigned long pstart, 189 unsigned long pstart, pend; 154 190 155 pstart = image->segmen 191 pstart = image->segment[j].mem; 156 pend = pstart + imag 192 pend = pstart + image->segment[j].memsz; 157 /* Do the segments ove 193 /* Do the segments overlap ? */ 158 if ((mend > pstart) && 194 if ((mend > pstart) && (mstart < pend)) 159 return -EINVAL !! 195 return result; 160 } 196 } 161 } 197 } 162 198 163 /* Ensure our buffer sizes are strictl 199 /* Ensure our buffer sizes are strictly less than 164 * our memory sizes. This should alwa 200 * our memory sizes. This should always be the case, 165 * and it is easier to check up front 201 * and it is easier to check up front than to be surprised 166 * later on. 202 * later on. 167 */ 203 */ >> 204 result = -EINVAL; 168 for (i = 0; i < nr_segments; i++) { 205 for (i = 0; i < nr_segments; i++) { 169 if (image->segment[i].bufsz > 206 if (image->segment[i].bufsz > image->segment[i].memsz) 170 return -EINVAL; !! 207 return result; 171 } 208 } 172 209 173 /* 210 /* 174 * Verify that no more than half of me << 175 * request from userspace is too large << 176 * wasted allocating pages, which can << 177 */ << 178 for (i = 0; i < nr_segments; i++) { << 179 if (PAGE_COUNT(image->segment[ << 180 return -EINVAL; << 181 << 182 total_pages += PAGE_COUNT(imag << 183 } << 184 << 185 if (total_pages > nr_pages / 2) << 186 return -EINVAL; << 187 << 188 #ifdef CONFIG_CRASH_DUMP << 189 /* << 190 * Verify we have good destination add 211 * Verify we have good destination addresses. Normally 191 * the caller is responsible for makin 212 * the caller is responsible for making certain we don't 192 * attempt to load the new image into 213 * attempt to load the new image into invalid or reserved 193 * areas of RAM. But crash kernels ar 214 * areas of RAM. But crash kernels are preloaded into a 194 * reserved area of ram. We must ensu 215 * reserved area of ram. We must ensure the addresses 195 * are in the reserved area otherwise 216 * are in the reserved area otherwise preloading the 196 * kernel could corrupt things. 217 * kernel could corrupt things. 197 */ 218 */ 198 219 199 if (image->type == KEXEC_TYPE_CRASH) { 220 if (image->type == KEXEC_TYPE_CRASH) { >> 221 result = -EADDRNOTAVAIL; 200 for (i = 0; i < nr_segments; i 222 for (i = 0; i < nr_segments; i++) { 201 unsigned long mstart, 223 unsigned long mstart, mend; 202 224 203 mstart = image->segmen 225 mstart = image->segment[i].mem; 204 mend = mstart + image- 226 mend = mstart + image->segment[i].memsz - 1; 205 /* Ensure we are withi 227 /* Ensure we are within the crash kernel limits */ 206 if ((mstart < phys_to_ !! 228 if ((mstart < crashk_res.start) || 207 (mend > phys_to_bo !! 229 (mend > crashk_res.end)) 208 return -EADDRN !! 230 return result; 209 } 231 } 210 } 232 } 211 #endif << 212 233 213 return 0; 234 return 0; 214 } 235 } 215 236 216 struct kimage *do_kimage_alloc_init(void) 237 struct kimage *do_kimage_alloc_init(void) 217 { 238 { 218 struct kimage *image; 239 struct kimage *image; 219 240 220 /* Allocate a controlling structure */ 241 /* Allocate a controlling structure */ 221 image = kzalloc(sizeof(*image), GFP_KE 242 image = kzalloc(sizeof(*image), GFP_KERNEL); 222 if (!image) 243 if (!image) 223 return NULL; 244 return NULL; 224 245 225 image->head = 0; 246 image->head = 0; 226 image->entry = &image->head; 247 image->entry = &image->head; 227 image->last_entry = &image->head; 248 image->last_entry = &image->head; 228 image->control_page = ~0; /* By defaul 249 image->control_page = ~0; /* By default this does not apply */ 229 image->type = KEXEC_TYPE_DEFAULT; 250 image->type = KEXEC_TYPE_DEFAULT; 230 251 231 /* Initialize the list of control page 252 /* Initialize the list of control pages */ 232 INIT_LIST_HEAD(&image->control_pages); 253 INIT_LIST_HEAD(&image->control_pages); 233 254 234 /* Initialize the list of destination 255 /* Initialize the list of destination pages */ 235 INIT_LIST_HEAD(&image->dest_pages); 256 INIT_LIST_HEAD(&image->dest_pages); 236 257 237 /* Initialize the list of unusable pag 258 /* Initialize the list of unusable pages */ 238 INIT_LIST_HEAD(&image->unusable_pages) 259 INIT_LIST_HEAD(&image->unusable_pages); 239 260 240 #ifdef CONFIG_CRASH_HOTPLUG << 241 image->hp_action = KEXEC_CRASH_HP_NONE << 242 image->elfcorehdr_index = -1; << 243 image->elfcorehdr_updated = false; << 244 #endif << 245 << 246 return image; 261 return image; 247 } 262 } 248 263 249 int kimage_is_destination_range(struct kimage 264 int kimage_is_destination_range(struct kimage *image, 250 unsign 265 unsigned long start, 251 unsign 266 unsigned long end) 252 { 267 { 253 unsigned long i; 268 unsigned long i; 254 269 255 for (i = 0; i < image->nr_segments; i+ 270 for (i = 0; i < image->nr_segments; i++) { 256 unsigned long mstart, mend; 271 unsigned long mstart, mend; 257 272 258 mstart = image->segment[i].mem 273 mstart = image->segment[i].mem; 259 mend = mstart + image->segment !! 274 mend = mstart + image->segment[i].memsz; 260 if ((end >= mstart) && (start !! 275 if ((end > mstart) && (start < mend)) 261 return 1; 276 return 1; 262 } 277 } 263 278 264 return 0; 279 return 0; 265 } 280 } 266 281 267 static struct page *kimage_alloc_pages(gfp_t g 282 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) 268 { 283 { 269 struct page *pages; 284 struct page *pages; 270 285 271 if (fatal_signal_pending(current)) !! 286 pages = alloc_pages(gfp_mask, order); 272 return NULL; << 273 pages = alloc_pages(gfp_mask & ~__GFP_ << 274 if (pages) { 287 if (pages) { 275 unsigned int count, i; 288 unsigned int count, i; 276 289 277 pages->mapping = NULL; 290 pages->mapping = NULL; 278 set_page_private(pages, order) 291 set_page_private(pages, order); 279 count = 1 << order; 292 count = 1 << order; 280 for (i = 0; i < count; i++) 293 for (i = 0; i < count; i++) 281 SetPageReserved(pages 294 SetPageReserved(pages + i); 282 << 283 arch_kexec_post_alloc_pages(pa << 284 gf << 285 << 286 if (gfp_mask & __GFP_ZERO) << 287 for (i = 0; i < count; << 288 clear_highpage << 289 } 295 } 290 296 291 return pages; 297 return pages; 292 } 298 } 293 299 294 static void kimage_free_pages(struct page *pag 300 static void kimage_free_pages(struct page *page) 295 { 301 { 296 unsigned int order, count, i; 302 unsigned int order, count, i; 297 303 298 order = page_private(page); 304 order = page_private(page); 299 count = 1 << order; 305 count = 1 << order; 300 << 301 arch_kexec_pre_free_pages(page_address << 302 << 303 for (i = 0; i < count; i++) 306 for (i = 0; i < count; i++) 304 ClearPageReserved(page + i); 307 ClearPageReserved(page + i); 305 __free_pages(page, order); 308 __free_pages(page, order); 306 } 309 } 307 310 308 void kimage_free_page_list(struct list_head *l 311 void kimage_free_page_list(struct list_head *list) 309 { 312 { 310 struct page *page, *next; !! 313 struct list_head *pos, *next; >> 314 >> 315 list_for_each_safe(pos, next, list) { >> 316 struct page *page; 311 317 312 list_for_each_entry_safe(page, next, l !! 318 page = list_entry(pos, struct page, lru); 313 list_del(&page->lru); 319 list_del(&page->lru); 314 kimage_free_pages(page); 320 kimage_free_pages(page); 315 } 321 } 316 } 322 } 317 323 318 static struct page *kimage_alloc_normal_contro 324 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 319 325 unsigned int order) 320 { 326 { 321 /* Control pages are special, they are 327 /* Control pages are special, they are the intermediaries 322 * that are needed while we copy the r 328 * that are needed while we copy the rest of the pages 323 * to their final resting place. As s 329 * to their final resting place. As such they must 324 * not conflict with either the destin 330 * not conflict with either the destination addresses 325 * or memory the kernel is already usi 331 * or memory the kernel is already using. 326 * 332 * 327 * The only case where we really need 333 * The only case where we really need more than one of 328 * these are for architectures where w 334 * these are for architectures where we cannot disable 329 * the MMU and must instead generate a 335 * the MMU and must instead generate an identity mapped 330 * page table for all of the memory. 336 * page table for all of the memory. 331 * 337 * 332 * At worst this runs in O(N) of the i 338 * At worst this runs in O(N) of the image size. 333 */ 339 */ 334 struct list_head extra_pages; 340 struct list_head extra_pages; 335 struct page *pages; 341 struct page *pages; 336 unsigned int count; 342 unsigned int count; 337 343 338 count = 1 << order; 344 count = 1 << order; 339 INIT_LIST_HEAD(&extra_pages); 345 INIT_LIST_HEAD(&extra_pages); 340 346 341 /* Loop while I can allocate a page an 347 /* Loop while I can allocate a page and the page allocated 342 * is a destination page. 348 * is a destination page. 343 */ 349 */ 344 do { 350 do { 345 unsigned long pfn, epfn, addr, 351 unsigned long pfn, epfn, addr, eaddr; 346 352 347 pages = kimage_alloc_pages(KEX 353 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); 348 if (!pages) 354 if (!pages) 349 break; 355 break; 350 pfn = page_to_boot_pfn(pages !! 356 pfn = page_to_pfn(pages); 351 epfn = pfn + count; 357 epfn = pfn + count; 352 addr = pfn << PAGE_SHIFT; 358 addr = pfn << PAGE_SHIFT; 353 eaddr = (epfn << PAGE_SHIFT) - !! 359 eaddr = epfn << PAGE_SHIFT; 354 if ((epfn >= (KEXEC_CONTROL_ME 360 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 355 kimage_is_destin 361 kimage_is_destination_range(image, addr, eaddr)) { 356 list_add(&pages->lru, 362 list_add(&pages->lru, &extra_pages); 357 pages = NULL; 363 pages = NULL; 358 } 364 } 359 } while (!pages); 365 } while (!pages); 360 366 361 if (pages) { 367 if (pages) { 362 /* Remember the allocated page 368 /* Remember the allocated page... */ 363 list_add(&pages->lru, &image-> 369 list_add(&pages->lru, &image->control_pages); 364 370 365 /* Because the page is already 371 /* Because the page is already in it's destination 366 * location we will never allo 372 * location we will never allocate another page at 367 * that address. Therefore ki 373 * that address. Therefore kimage_alloc_pages 368 * will not return it (again) 374 * will not return it (again) and we don't need 369 * to give it an entry in imag 375 * to give it an entry in image->segment[]. 370 */ 376 */ 371 } 377 } 372 /* Deal with the destination pages I h 378 /* Deal with the destination pages I have inadvertently allocated. 373 * 379 * 374 * Ideally I would convert multi-page 380 * Ideally I would convert multi-page allocations into single 375 * page allocations, and add everythin 381 * page allocations, and add everything to image->dest_pages. 376 * 382 * 377 * For now it is simpler to just free 383 * For now it is simpler to just free the pages. 378 */ 384 */ 379 kimage_free_page_list(&extra_pages); 385 kimage_free_page_list(&extra_pages); 380 386 381 return pages; 387 return pages; 382 } 388 } 383 389 384 #ifdef CONFIG_CRASH_DUMP << 385 static struct page *kimage_alloc_crash_control 390 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 386 391 unsigned int order) 387 { 392 { 388 /* Control pages are special, they are 393 /* Control pages are special, they are the intermediaries 389 * that are needed while we copy the r 394 * that are needed while we copy the rest of the pages 390 * to their final resting place. As s 395 * to their final resting place. As such they must 391 * not conflict with either the destin 396 * not conflict with either the destination addresses 392 * or memory the kernel is already usi 397 * or memory the kernel is already using. 393 * 398 * 394 * Control pages are also the only pag 399 * Control pages are also the only pags we must allocate 395 * when loading a crash kernel. All o 400 * when loading a crash kernel. All of the other pages 396 * are specified by the segments and w 401 * are specified by the segments and we just memcpy 397 * into them directly. 402 * into them directly. 398 * 403 * 399 * The only case where we really need 404 * The only case where we really need more than one of 400 * these are for architectures where w 405 * these are for architectures where we cannot disable 401 * the MMU and must instead generate a 406 * the MMU and must instead generate an identity mapped 402 * page table for all of the memory. 407 * page table for all of the memory. 403 * 408 * 404 * Given the low demand this implement 409 * Given the low demand this implements a very simple 405 * allocator that finds the first hole 410 * allocator that finds the first hole of the appropriate 406 * size in the reserved memory region, 411 * size in the reserved memory region, and allocates all 407 * of the memory up to and including t 412 * of the memory up to and including the hole. 408 */ 413 */ 409 unsigned long hole_start, hole_end, si 414 unsigned long hole_start, hole_end, size; 410 struct page *pages; 415 struct page *pages; 411 416 412 pages = NULL; 417 pages = NULL; 413 size = (1 << order) << PAGE_SHIFT; 418 size = (1 << order) << PAGE_SHIFT; 414 hole_start = ALIGN(image->control_page !! 419 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 415 hole_end = hole_start + size - 1; 420 hole_end = hole_start + size - 1; 416 while (hole_end <= crashk_res.end) { 421 while (hole_end <= crashk_res.end) { 417 unsigned long i; 422 unsigned long i; 418 423 419 cond_resched(); << 420 << 421 if (hole_end > KEXEC_CRASH_CON 424 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 422 break; 425 break; 423 /* See if I overlap any of the 426 /* See if I overlap any of the segments */ 424 for (i = 0; i < image->nr_segm 427 for (i = 0; i < image->nr_segments; i++) { 425 unsigned long mstart, 428 unsigned long mstart, mend; 426 429 427 mstart = image->segmen 430 mstart = image->segment[i].mem; 428 mend = mstart + imag 431 mend = mstart + image->segment[i].memsz - 1; 429 if ((hole_end >= mstar 432 if ((hole_end >= mstart) && (hole_start <= mend)) { 430 /* Advance the 433 /* Advance the hole to the end of the segment */ 431 hole_start = A !! 434 hole_start = (mend + (size - 1)) & ~(size - 1); 432 hole_end = h 435 hole_end = hole_start + size - 1; 433 break; 436 break; 434 } 437 } 435 } 438 } 436 /* If I don't overlap any segm 439 /* If I don't overlap any segments I have found my hole! */ 437 if (i == image->nr_segments) { 440 if (i == image->nr_segments) { 438 pages = pfn_to_page(ho 441 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 439 image->control_page = !! 442 image->control_page = hole_end; 440 break; 443 break; 441 } 444 } 442 } 445 } 443 446 444 /* Ensure that these pages are decrypt << 445 if (pages) << 446 arch_kexec_post_alloc_pages(pa << 447 << 448 return pages; 447 return pages; 449 } 448 } 450 #endif << 451 449 452 450 453 struct page *kimage_alloc_control_pages(struct 451 struct page *kimage_alloc_control_pages(struct kimage *image, 454 unsig 452 unsigned int order) 455 { 453 { 456 struct page *pages = NULL; 454 struct page *pages = NULL; 457 455 458 switch (image->type) { 456 switch (image->type) { 459 case KEXEC_TYPE_DEFAULT: 457 case KEXEC_TYPE_DEFAULT: 460 pages = kimage_alloc_normal_co 458 pages = kimage_alloc_normal_control_pages(image, order); 461 break; 459 break; 462 #ifdef CONFIG_CRASH_DUMP << 463 case KEXEC_TYPE_CRASH: 460 case KEXEC_TYPE_CRASH: 464 pages = kimage_alloc_crash_con 461 pages = kimage_alloc_crash_control_pages(image, order); 465 break; 462 break; 466 #endif << 467 } 463 } 468 464 469 return pages; 465 return pages; 470 } 466 } 471 467 472 static int kimage_add_entry(struct kimage *ima 468 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 473 { 469 { 474 if (*image->entry != 0) 470 if (*image->entry != 0) 475 image->entry++; 471 image->entry++; 476 472 477 if (image->entry == image->last_entry) 473 if (image->entry == image->last_entry) { 478 kimage_entry_t *ind_page; 474 kimage_entry_t *ind_page; 479 struct page *page; 475 struct page *page; 480 476 481 page = kimage_alloc_page(image 477 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 482 if (!page) 478 if (!page) 483 return -ENOMEM; 479 return -ENOMEM; 484 480 485 ind_page = page_address(page); 481 ind_page = page_address(page); 486 *image->entry = virt_to_boot_p !! 482 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 487 image->entry = ind_page; 483 image->entry = ind_page; 488 image->last_entry = ind_page + 484 image->last_entry = ind_page + 489 ((PAGE_S 485 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 490 } 486 } 491 *image->entry = entry; 487 *image->entry = entry; 492 image->entry++; 488 image->entry++; 493 *image->entry = 0; 489 *image->entry = 0; 494 490 495 return 0; 491 return 0; 496 } 492 } 497 493 498 static int kimage_set_destination(struct kimag 494 static int kimage_set_destination(struct kimage *image, 499 unsigned lo 495 unsigned long destination) 500 { 496 { >> 497 int result; >> 498 501 destination &= PAGE_MASK; 499 destination &= PAGE_MASK; >> 500 result = kimage_add_entry(image, destination | IND_DESTINATION); 502 501 503 return kimage_add_entry(image, destina !! 502 return result; 504 } 503 } 505 504 506 505 507 static int kimage_add_page(struct kimage *imag 506 static int kimage_add_page(struct kimage *image, unsigned long page) 508 { 507 { >> 508 int result; >> 509 509 page &= PAGE_MASK; 510 page &= PAGE_MASK; >> 511 result = kimage_add_entry(image, page | IND_SOURCE); 510 512 511 return kimage_add_entry(image, page | !! 513 return result; 512 } 514 } 513 515 514 516 515 static void kimage_free_extra_pages(struct kim 517 static void kimage_free_extra_pages(struct kimage *image) 516 { 518 { 517 /* Walk through and free any extra des 519 /* Walk through and free any extra destination pages I may have */ 518 kimage_free_page_list(&image->dest_pag 520 kimage_free_page_list(&image->dest_pages); 519 521 520 /* Walk through and free any unusable 522 /* Walk through and free any unusable pages I have cached */ 521 kimage_free_page_list(&image->unusable 523 kimage_free_page_list(&image->unusable_pages); 522 524 523 } 525 } 524 << 525 void kimage_terminate(struct kimage *image) 526 void kimage_terminate(struct kimage *image) 526 { 527 { 527 if (*image->entry != 0) 528 if (*image->entry != 0) 528 image->entry++; 529 image->entry++; 529 530 530 *image->entry = IND_DONE; 531 *image->entry = IND_DONE; 531 } 532 } 532 533 533 #define for_each_kimage_entry(image, ptr, entr 534 #define for_each_kimage_entry(image, ptr, entry) \ 534 for (ptr = &image->head; (entry = *ptr 535 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 535 ptr = (entry & IND_INDIRECTION 536 ptr = (entry & IND_INDIRECTION) ? \ 536 boot_phys_to_virt((ent !! 537 phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 537 538 538 static void kimage_free_entry(kimage_entry_t e 539 static void kimage_free_entry(kimage_entry_t entry) 539 { 540 { 540 struct page *page; 541 struct page *page; 541 542 542 page = boot_pfn_to_page(entry >> PAGE_ !! 543 page = pfn_to_page(entry >> PAGE_SHIFT); 543 kimage_free_pages(page); 544 kimage_free_pages(page); 544 } 545 } 545 546 546 void kimage_free(struct kimage *image) 547 void kimage_free(struct kimage *image) 547 { 548 { 548 kimage_entry_t *ptr, entry; 549 kimage_entry_t *ptr, entry; 549 kimage_entry_t ind = 0; 550 kimage_entry_t ind = 0; 550 551 551 if (!image) 552 if (!image) 552 return; 553 return; 553 554 554 #ifdef CONFIG_CRASH_DUMP << 555 if (image->vmcoreinfo_data_copy) { << 556 crash_update_vmcoreinfo_safeco << 557 vunmap(image->vmcoreinfo_data_ << 558 } << 559 #endif << 560 << 561 kimage_free_extra_pages(image); 555 kimage_free_extra_pages(image); 562 for_each_kimage_entry(image, ptr, entr 556 for_each_kimage_entry(image, ptr, entry) { 563 if (entry & IND_INDIRECTION) { 557 if (entry & IND_INDIRECTION) { 564 /* Free the previous i 558 /* Free the previous indirection page */ 565 if (ind & IND_INDIRECT 559 if (ind & IND_INDIRECTION) 566 kimage_free_en 560 kimage_free_entry(ind); 567 /* Save this indirecti 561 /* Save this indirection page until we are 568 * done with it. 562 * done with it. 569 */ 563 */ 570 ind = entry; 564 ind = entry; 571 } else if (entry & IND_SOURCE) 565 } else if (entry & IND_SOURCE) 572 kimage_free_entry(entr 566 kimage_free_entry(entry); 573 } 567 } 574 /* Free the final indirection page */ 568 /* Free the final indirection page */ 575 if (ind & IND_INDIRECTION) 569 if (ind & IND_INDIRECTION) 576 kimage_free_entry(ind); 570 kimage_free_entry(ind); 577 571 578 /* Handle any machine specific cleanup 572 /* Handle any machine specific cleanup */ 579 machine_kexec_cleanup(image); 573 machine_kexec_cleanup(image); 580 574 581 /* Free the kexec control pages... */ 575 /* Free the kexec control pages... */ 582 kimage_free_page_list(&image->control_ 576 kimage_free_page_list(&image->control_pages); 583 577 584 /* 578 /* 585 * Free up any temporary buffers alloc 579 * Free up any temporary buffers allocated. This might hit if 586 * error occurred much later after buf 580 * error occurred much later after buffer allocation. 587 */ 581 */ 588 if (image->file_mode) 582 if (image->file_mode) 589 kimage_file_post_load_cleanup( 583 kimage_file_post_load_cleanup(image); 590 584 591 kfree(image); 585 kfree(image); 592 } 586 } 593 587 594 static kimage_entry_t *kimage_dst_used(struct 588 static kimage_entry_t *kimage_dst_used(struct kimage *image, 595 unsign 589 unsigned long page) 596 { 590 { 597 kimage_entry_t *ptr, entry; 591 kimage_entry_t *ptr, entry; 598 unsigned long destination = 0; 592 unsigned long destination = 0; 599 593 600 for_each_kimage_entry(image, ptr, entr 594 for_each_kimage_entry(image, ptr, entry) { 601 if (entry & IND_DESTINATION) 595 if (entry & IND_DESTINATION) 602 destination = entry & 596 destination = entry & PAGE_MASK; 603 else if (entry & IND_SOURCE) { 597 else if (entry & IND_SOURCE) { 604 if (page == destinatio 598 if (page == destination) 605 return ptr; 599 return ptr; 606 destination += PAGE_SI 600 destination += PAGE_SIZE; 607 } 601 } 608 } 602 } 609 603 610 return NULL; 604 return NULL; 611 } 605 } 612 606 613 static struct page *kimage_alloc_page(struct k 607 static struct page *kimage_alloc_page(struct kimage *image, 614 gfp_t 608 gfp_t gfp_mask, 615 unsign 609 unsigned long destination) 616 { 610 { 617 /* 611 /* 618 * Here we implement safeguards to ens 612 * Here we implement safeguards to ensure that a source page 619 * is not copied to its destination pa 613 * is not copied to its destination page before the data on 620 * the destination page is no longer u 614 * the destination page is no longer useful. 621 * 615 * 622 * To do this we maintain the invarian 616 * To do this we maintain the invariant that a source page is 623 * either its own destination page, or 617 * either its own destination page, or it is not a 624 * destination page at all. 618 * destination page at all. 625 * 619 * 626 * That is slightly stronger than requ 620 * That is slightly stronger than required, but the proof 627 * that no problems will not occur is 621 * that no problems will not occur is trivial, and the 628 * implementation is simply to verify. 622 * implementation is simply to verify. 629 * 623 * 630 * When allocating all pages normally 624 * When allocating all pages normally this algorithm will run 631 * in O(N) time, but in the worst case 625 * in O(N) time, but in the worst case it will run in O(N^2) 632 * time. If the runtime is a problem 626 * time. If the runtime is a problem the data structures can 633 * be fixed. 627 * be fixed. 634 */ 628 */ 635 struct page *page; 629 struct page *page; 636 unsigned long addr; 630 unsigned long addr; 637 631 638 /* 632 /* 639 * Walk through the list of destinatio 633 * Walk through the list of destination pages, and see if I 640 * have a match. 634 * have a match. 641 */ 635 */ 642 list_for_each_entry(page, &image->dest 636 list_for_each_entry(page, &image->dest_pages, lru) { 643 addr = page_to_boot_pfn(page) !! 637 addr = page_to_pfn(page) << PAGE_SHIFT; 644 if (addr == destination) { 638 if (addr == destination) { 645 list_del(&page->lru); 639 list_del(&page->lru); 646 return page; 640 return page; 647 } 641 } 648 } 642 } 649 page = NULL; 643 page = NULL; 650 while (1) { 644 while (1) { 651 kimage_entry_t *old; 645 kimage_entry_t *old; 652 646 653 /* Allocate a page, if we run 647 /* Allocate a page, if we run out of memory give up */ 654 page = kimage_alloc_pages(gfp_ 648 page = kimage_alloc_pages(gfp_mask, 0); 655 if (!page) 649 if (!page) 656 return NULL; 650 return NULL; 657 /* If the page cannot be used 651 /* If the page cannot be used file it away */ 658 if (page_to_boot_pfn(page) > !! 652 if (page_to_pfn(page) > 659 (KEXEC_SOURCE_ 653 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 660 list_add(&page->lru, & 654 list_add(&page->lru, &image->unusable_pages); 661 continue; 655 continue; 662 } 656 } 663 addr = page_to_boot_pfn(page) !! 657 addr = page_to_pfn(page) << PAGE_SHIFT; 664 658 665 /* If it is the destination pa 659 /* If it is the destination page we want use it */ 666 if (addr == destination) 660 if (addr == destination) 667 break; 661 break; 668 662 669 /* If the page is not a destin 663 /* If the page is not a destination page use it */ 670 if (!kimage_is_destination_ran 664 if (!kimage_is_destination_range(image, addr, 671 !! 665 addr + PAGE_SIZE)) 672 break; 666 break; 673 667 674 /* 668 /* 675 * I know that the page is som 669 * I know that the page is someones destination page. 676 * See if there is already a s 670 * See if there is already a source page for this 677 * destination page. And if s 671 * destination page. And if so swap the source pages. 678 */ 672 */ 679 old = kimage_dst_used(image, a 673 old = kimage_dst_used(image, addr); 680 if (old) { 674 if (old) { 681 /* If so move it */ 675 /* If so move it */ 682 unsigned long old_addr 676 unsigned long old_addr; 683 struct page *old_page; 677 struct page *old_page; 684 678 685 old_addr = *old & PAGE 679 old_addr = *old & PAGE_MASK; 686 old_page = boot_pfn_to !! 680 old_page = pfn_to_page(old_addr >> PAGE_SHIFT); 687 copy_highpage(page, ol 681 copy_highpage(page, old_page); 688 *old = addr | (*old & 682 *old = addr | (*old & ~PAGE_MASK); 689 683 690 /* The old page I have 684 /* The old page I have found cannot be a 691 * destination page, s 685 * destination page, so return it if it's 692 * gfp_flags honor the 686 * gfp_flags honor the ones passed in. 693 */ 687 */ 694 if (!(gfp_mask & __GFP 688 if (!(gfp_mask & __GFP_HIGHMEM) && 695 PageHighMem(old_pa 689 PageHighMem(old_page)) { 696 kimage_free_pa 690 kimage_free_pages(old_page); 697 continue; 691 continue; 698 } 692 } >> 693 addr = old_addr; 699 page = old_page; 694 page = old_page; 700 break; 695 break; 701 } 696 } 702 /* Place the page on the desti 697 /* Place the page on the destination list, to be used later */ 703 list_add(&page->lru, &image->d 698 list_add(&page->lru, &image->dest_pages); 704 } 699 } 705 700 706 return page; 701 return page; 707 } 702 } 708 703 709 static int kimage_load_normal_segment(struct k 704 static int kimage_load_normal_segment(struct kimage *image, 710 struc 705 struct kexec_segment *segment) 711 { 706 { 712 unsigned long maddr; 707 unsigned long maddr; 713 size_t ubytes, mbytes; 708 size_t ubytes, mbytes; 714 int result; 709 int result; 715 unsigned char __user *buf = NULL; 710 unsigned char __user *buf = NULL; 716 unsigned char *kbuf = NULL; 711 unsigned char *kbuf = NULL; 717 712 >> 713 result = 0; 718 if (image->file_mode) 714 if (image->file_mode) 719 kbuf = segment->kbuf; 715 kbuf = segment->kbuf; 720 else 716 else 721 buf = segment->buf; 717 buf = segment->buf; 722 ubytes = segment->bufsz; 718 ubytes = segment->bufsz; 723 mbytes = segment->memsz; 719 mbytes = segment->memsz; 724 maddr = segment->mem; 720 maddr = segment->mem; 725 721 726 result = kimage_set_destination(image, 722 result = kimage_set_destination(image, maddr); 727 if (result < 0) 723 if (result < 0) 728 goto out; 724 goto out; 729 725 730 while (mbytes) { 726 while (mbytes) { 731 struct page *page; 727 struct page *page; 732 char *ptr; 728 char *ptr; 733 size_t uchunk, mchunk; 729 size_t uchunk, mchunk; 734 730 735 page = kimage_alloc_page(image 731 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 736 if (!page) { 732 if (!page) { 737 result = -ENOMEM; 733 result = -ENOMEM; 738 goto out; 734 goto out; 739 } 735 } 740 result = kimage_add_page(image !! 736 result = kimage_add_page(image, page_to_pfn(page) 741 737 << PAGE_SHIFT); 742 if (result < 0) 738 if (result < 0) 743 goto out; 739 goto out; 744 740 745 ptr = kmap_local_page(page); !! 741 ptr = kmap(page); 746 /* Start with a clear page */ 742 /* Start with a clear page */ 747 clear_page(ptr); 743 clear_page(ptr); 748 ptr += maddr & ~PAGE_MASK; 744 ptr += maddr & ~PAGE_MASK; 749 mchunk = min_t(size_t, mbytes, 745 mchunk = min_t(size_t, mbytes, 750 PAGE_SIZE - (m 746 PAGE_SIZE - (maddr & ~PAGE_MASK)); 751 uchunk = min(ubytes, mchunk); 747 uchunk = min(ubytes, mchunk); 752 748 753 if (uchunk) { !! 749 /* For file based kexec, source pages are in kernel memory */ 754 /* For file based kexe !! 750 if (image->file_mode) 755 if (image->file_mode) !! 751 memcpy(ptr, kbuf, uchunk); 756 memcpy(ptr, kb !! 752 else 757 else !! 753 result = copy_from_user(ptr, buf, uchunk); 758 result = copy_ !! 754 kunmap(page); 759 ubytes -= uchunk; << 760 if (image->file_mode) << 761 kbuf += uchunk << 762 else << 763 buf += uchunk; << 764 } << 765 kunmap_local(ptr); << 766 if (result) { 755 if (result) { 767 result = -EFAULT; 756 result = -EFAULT; 768 goto out; 757 goto out; 769 } 758 } >> 759 ubytes -= uchunk; 770 maddr += mchunk; 760 maddr += mchunk; >> 761 if (image->file_mode) >> 762 kbuf += mchunk; >> 763 else >> 764 buf += mchunk; 771 mbytes -= mchunk; 765 mbytes -= mchunk; 772 << 773 cond_resched(); << 774 } 766 } 775 out: 767 out: 776 return result; 768 return result; 777 } 769 } 778 770 779 #ifdef CONFIG_CRASH_DUMP << 780 static int kimage_load_crash_segment(struct ki 771 static int kimage_load_crash_segment(struct kimage *image, 781 struct 772 struct kexec_segment *segment) 782 { 773 { 783 /* For crash dumps kernels we simply c 774 /* For crash dumps kernels we simply copy the data from 784 * user space to it's destination. 775 * user space to it's destination. 785 * We do things a page at a time for t 776 * We do things a page at a time for the sake of kmap. 786 */ 777 */ 787 unsigned long maddr; 778 unsigned long maddr; 788 size_t ubytes, mbytes; 779 size_t ubytes, mbytes; 789 int result; 780 int result; 790 unsigned char __user *buf = NULL; 781 unsigned char __user *buf = NULL; 791 unsigned char *kbuf = NULL; 782 unsigned char *kbuf = NULL; 792 783 793 result = 0; 784 result = 0; 794 if (image->file_mode) 785 if (image->file_mode) 795 kbuf = segment->kbuf; 786 kbuf = segment->kbuf; 796 else 787 else 797 buf = segment->buf; 788 buf = segment->buf; 798 ubytes = segment->bufsz; 789 ubytes = segment->bufsz; 799 mbytes = segment->memsz; 790 mbytes = segment->memsz; 800 maddr = segment->mem; 791 maddr = segment->mem; 801 while (mbytes) { 792 while (mbytes) { 802 struct page *page; 793 struct page *page; 803 char *ptr; 794 char *ptr; 804 size_t uchunk, mchunk; 795 size_t uchunk, mchunk; 805 796 806 page = boot_pfn_to_page(maddr !! 797 page = pfn_to_page(maddr >> PAGE_SHIFT); 807 if (!page) { 798 if (!page) { 808 result = -ENOMEM; 799 result = -ENOMEM; 809 goto out; 800 goto out; 810 } 801 } 811 arch_kexec_post_alloc_pages(pa !! 802 ptr = kmap(page); 812 ptr = kmap_local_page(page); << 813 ptr += maddr & ~PAGE_MASK; 803 ptr += maddr & ~PAGE_MASK; 814 mchunk = min_t(size_t, mbytes, 804 mchunk = min_t(size_t, mbytes, 815 PAGE_SIZE - (m 805 PAGE_SIZE - (maddr & ~PAGE_MASK)); 816 uchunk = min(ubytes, mchunk); 806 uchunk = min(ubytes, mchunk); 817 if (mchunk > uchunk) { 807 if (mchunk > uchunk) { 818 /* Zero the trailing p 808 /* Zero the trailing part of the page */ 819 memset(ptr + uchunk, 0 809 memset(ptr + uchunk, 0, mchunk - uchunk); 820 } 810 } 821 811 822 if (uchunk) { !! 812 /* For file based kexec, source pages are in kernel memory */ 823 /* For file based kexe !! 813 if (image->file_mode) 824 if (image->file_mode) !! 814 memcpy(ptr, kbuf, uchunk); 825 memcpy(ptr, kb !! 815 else 826 else !! 816 result = copy_from_user(ptr, buf, uchunk); 827 result = copy_ << 828 ubytes -= uchunk; << 829 if (image->file_mode) << 830 kbuf += uchunk << 831 else << 832 buf += uchunk; << 833 } << 834 kexec_flush_icache_page(page); 817 kexec_flush_icache_page(page); 835 kunmap_local(ptr); !! 818 kunmap(page); 836 arch_kexec_pre_free_pages(page << 837 if (result) { 819 if (result) { 838 result = -EFAULT; 820 result = -EFAULT; 839 goto out; 821 goto out; 840 } 822 } >> 823 ubytes -= uchunk; 841 maddr += mchunk; 824 maddr += mchunk; >> 825 if (image->file_mode) >> 826 kbuf += mchunk; >> 827 else >> 828 buf += mchunk; 842 mbytes -= mchunk; 829 mbytes -= mchunk; 843 << 844 cond_resched(); << 845 } 830 } 846 out: 831 out: 847 return result; 832 return result; 848 } 833 } 849 #endif << 850 834 851 int kimage_load_segment(struct kimage *image, 835 int kimage_load_segment(struct kimage *image, 852 struct kexec_s 836 struct kexec_segment *segment) 853 { 837 { 854 int result = -ENOMEM; 838 int result = -ENOMEM; 855 839 856 switch (image->type) { 840 switch (image->type) { 857 case KEXEC_TYPE_DEFAULT: 841 case KEXEC_TYPE_DEFAULT: 858 result = kimage_load_normal_se 842 result = kimage_load_normal_segment(image, segment); 859 break; 843 break; 860 #ifdef CONFIG_CRASH_DUMP << 861 case KEXEC_TYPE_CRASH: 844 case KEXEC_TYPE_CRASH: 862 result = kimage_load_crash_seg 845 result = kimage_load_crash_segment(image, segment); 863 break; 846 break; 864 #endif << 865 } 847 } 866 848 867 return result; 849 return result; 868 } 850 } 869 851 870 struct kexec_load_limit { !! 852 struct kimage *kexec_image; 871 /* Mutex protects the limit count. */ !! 853 struct kimage *kexec_crash_image; 872 struct mutex mutex; !! 854 int kexec_load_disabled; 873 int limit; << 874 }; << 875 855 876 static struct kexec_load_limit load_limit_rebo !! 856 void crash_kexec(struct pt_regs *regs) 877 .mutex = __MUTEX_INITIALIZER(load_limi !! 857 { 878 .limit = -1, !! 858 /* Take the kexec_mutex here to prevent sys_kexec_load 879 }; !! 859 * running on one cpu from replacing the crash kernel >> 860 * we are using after a panic on a different cpu. >> 861 * >> 862 * If the crash kernel was not located in a fixed area >> 863 * of memory the xchg(&kexec_crash_image) would be >> 864 * sufficient. But since I reuse the memory... >> 865 */ >> 866 if (mutex_trylock(&kexec_mutex)) { >> 867 if (kexec_crash_image) { >> 868 struct pt_regs fixed_regs; >> 869 >> 870 crash_setup_regs(&fixed_regs, regs); >> 871 crash_save_vmcoreinfo(); >> 872 machine_crash_shutdown(&fixed_regs); >> 873 machine_kexec(kexec_crash_image); >> 874 } >> 875 mutex_unlock(&kexec_mutex); >> 876 } >> 877 } 880 878 881 static struct kexec_load_limit load_limit_pani !! 879 size_t crash_get_memory_size(void) 882 .mutex = __MUTEX_INITIALIZER(load_limi !! 880 { 883 .limit = -1, !! 881 size_t size = 0; 884 }; << 885 882 886 struct kimage *kexec_image; !! 883 mutex_lock(&kexec_mutex); 887 struct kimage *kexec_crash_image; !! 884 if (crashk_res.end != crashk_res.start) 888 static int kexec_load_disabled; !! 885 size = resource_size(&crashk_res); >> 886 mutex_unlock(&kexec_mutex); >> 887 return size; >> 888 } >> 889 >> 890 void __weak crash_free_reserved_phys_range(unsigned long begin, >> 891 unsigned long end) >> 892 { >> 893 unsigned long addr; >> 894 >> 895 for (addr = begin; addr < end; addr += PAGE_SIZE) >> 896 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); >> 897 } >> 898 >> 899 int crash_shrink_memory(unsigned long new_size) >> 900 { >> 901 int ret = 0; >> 902 unsigned long start, end; >> 903 unsigned long old_size; >> 904 struct resource *ram_res; >> 905 >> 906 mutex_lock(&kexec_mutex); >> 907 >> 908 if (kexec_crash_image) { >> 909 ret = -ENOENT; >> 910 goto unlock; >> 911 } >> 912 start = crashk_res.start; >> 913 end = crashk_res.end; >> 914 old_size = (end == 0) ? 0 : end - start + 1; >> 915 if (new_size >= old_size) { >> 916 ret = (new_size == old_size) ? 0 : -EINVAL; >> 917 goto unlock; >> 918 } >> 919 >> 920 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); >> 921 if (!ram_res) { >> 922 ret = -ENOMEM; >> 923 goto unlock; >> 924 } >> 925 >> 926 start = roundup(start, KEXEC_CRASH_MEM_ALIGN); >> 927 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); >> 928 >> 929 crash_map_reserved_pages(); >> 930 crash_free_reserved_phys_range(end, crashk_res.end); >> 931 >> 932 if ((start == end) && (crashk_res.parent != NULL)) >> 933 release_resource(&crashk_res); >> 934 >> 935 ram_res->start = end; >> 936 ram_res->end = crashk_res.end; >> 937 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; >> 938 ram_res->name = "System RAM"; >> 939 >> 940 crashk_res.end = end - 1; >> 941 >> 942 insert_resource(&iomem_resource, ram_res); >> 943 crash_unmap_reserved_pages(); >> 944 >> 945 unlock: >> 946 mutex_unlock(&kexec_mutex); >> 947 return ret; >> 948 } >> 949 >> 950 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, >> 951 size_t data_len) >> 952 { >> 953 struct elf_note note; >> 954 >> 955 note.n_namesz = strlen(name) + 1; >> 956 note.n_descsz = data_len; >> 957 note.n_type = type; >> 958 memcpy(buf, ¬e, sizeof(note)); >> 959 buf += (sizeof(note) + 3)/4; >> 960 memcpy(buf, name, note.n_namesz); >> 961 buf += (note.n_namesz + 3)/4; >> 962 memcpy(buf, data, note.n_descsz); >> 963 buf += (note.n_descsz + 3)/4; >> 964 >> 965 return buf; >> 966 } >> 967 >> 968 static void final_note(u32 *buf) >> 969 { >> 970 struct elf_note note; >> 971 >> 972 note.n_namesz = 0; >> 973 note.n_descsz = 0; >> 974 note.n_type = 0; >> 975 memcpy(buf, ¬e, sizeof(note)); >> 976 } >> 977 >> 978 void crash_save_cpu(struct pt_regs *regs, int cpu) >> 979 { >> 980 struct elf_prstatus prstatus; >> 981 u32 *buf; >> 982 >> 983 if ((cpu < 0) || (cpu >= nr_cpu_ids)) >> 984 return; >> 985 >> 986 /* Using ELF notes here is opportunistic. >> 987 * I need a well defined structure format >> 988 * for the data I pass, and I need tags >> 989 * on the data to indicate what information I have >> 990 * squirrelled away. ELF notes happen to provide >> 991 * all of that, so there is no need to invent something new. >> 992 */ >> 993 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); >> 994 if (!buf) >> 995 return; >> 996 memset(&prstatus, 0, sizeof(prstatus)); >> 997 prstatus.pr_pid = current->pid; >> 998 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); >> 999 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, >> 1000 &prstatus, sizeof(prstatus)); >> 1001 final_note(buf); >> 1002 } >> 1003 >> 1004 static int __init crash_notes_memory_init(void) >> 1005 { >> 1006 /* Allocate memory for saving cpu registers. */ >> 1007 size_t size, align; >> 1008 >> 1009 /* >> 1010 * crash_notes could be allocated across 2 vmalloc pages when percpu >> 1011 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc >> 1012 * pages are also on 2 continuous physical pages. In this case the >> 1013 * 2nd part of crash_notes in 2nd page could be lost since only the >> 1014 * starting address and size of crash_notes are exported through sysfs. >> 1015 * Here round up the size of crash_notes to the nearest power of two >> 1016 * and pass it to __alloc_percpu as align value. This can make sure >> 1017 * crash_notes is allocated inside one physical page. >> 1018 */ >> 1019 size = sizeof(note_buf_t); >> 1020 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE); >> 1021 >> 1022 /* >> 1023 * Break compile if size is bigger than PAGE_SIZE since crash_notes >> 1024 * definitely will be in 2 pages with that. >> 1025 */ >> 1026 BUILD_BUG_ON(size > PAGE_SIZE); >> 1027 >> 1028 crash_notes = __alloc_percpu(size, align); >> 1029 if (!crash_notes) { >> 1030 pr_warn("Memory allocation for saving cpu register states failed\n"); >> 1031 return -ENOMEM; >> 1032 } >> 1033 return 0; >> 1034 } >> 1035 subsys_initcall(crash_notes_memory_init); >> 1036 >> 1037 >> 1038 /* >> 1039 * parsing the "crashkernel" commandline >> 1040 * >> 1041 * this code is intended to be called from architecture specific code >> 1042 */ >> 1043 >> 1044 >> 1045 /* >> 1046 * This function parses command lines in the format >> 1047 * >> 1048 * crashkernel=ramsize-range:size[,...][@offset] >> 1049 * >> 1050 * The function returns 0 on success and -EINVAL on failure. >> 1051 */ >> 1052 static int __init parse_crashkernel_mem(char *cmdline, >> 1053 unsigned long long system_ram, >> 1054 unsigned long long *crash_size, >> 1055 unsigned long long *crash_base) >> 1056 { >> 1057 char *cur = cmdline, *tmp; 889 1058 890 #ifdef CONFIG_SYSCTL !! 1059 /* for each entry of the comma-separated list */ 891 static int kexec_limit_handler(const struct ct !! 1060 do { 892 void *buffer, s !! 1061 unsigned long long start, end = ULLONG_MAX, size; 893 { << 894 struct kexec_load_limit *limit = table << 895 int val; << 896 struct ctl_table tmp = { << 897 .data = &val, << 898 .maxlen = sizeof(val), << 899 .mode = table->mode, << 900 }; << 901 int ret; << 902 << 903 if (write) { << 904 ret = proc_dointvec(&tmp, writ << 905 if (ret) << 906 return ret; << 907 1062 908 if (val < 0) !! 1063 /* get the start of the range */ >> 1064 start = memparse(cur, &tmp); >> 1065 if (cur == tmp) { >> 1066 pr_warn("crashkernel: Memory value expected\n"); >> 1067 return -EINVAL; >> 1068 } >> 1069 cur = tmp; >> 1070 if (*cur != '-') { >> 1071 pr_warn("crashkernel: '-' expected\n"); 909 return -EINVAL; 1072 return -EINVAL; >> 1073 } >> 1074 cur++; 910 1075 911 mutex_lock(&limit->mutex); !! 1076 /* if no ':' is here, than we read the end */ 912 if (limit->limit != -1 && val !! 1077 if (*cur != ':') { 913 ret = -EINVAL; !! 1078 end = memparse(cur, &tmp); 914 else !! 1079 if (cur == tmp) { 915 limit->limit = val; !! 1080 pr_warn("crashkernel: Memory value expected\n"); 916 mutex_unlock(&limit->mutex); !! 1081 return -EINVAL; >> 1082 } >> 1083 cur = tmp; >> 1084 if (end <= start) { >> 1085 pr_warn("crashkernel: end <= start\n"); >> 1086 return -EINVAL; >> 1087 } >> 1088 } >> 1089 >> 1090 if (*cur != ':') { >> 1091 pr_warn("crashkernel: ':' expected\n"); >> 1092 return -EINVAL; >> 1093 } >> 1094 cur++; >> 1095 >> 1096 size = memparse(cur, &tmp); >> 1097 if (cur == tmp) { >> 1098 pr_warn("Memory value expected\n"); >> 1099 return -EINVAL; >> 1100 } >> 1101 cur = tmp; >> 1102 if (size >= system_ram) { >> 1103 pr_warn("crashkernel: invalid size\n"); >> 1104 return -EINVAL; >> 1105 } >> 1106 >> 1107 /* match ? */ >> 1108 if (system_ram >= start && system_ram < end) { >> 1109 *crash_size = size; >> 1110 break; >> 1111 } >> 1112 } while (*cur++ == ','); >> 1113 >> 1114 if (*crash_size > 0) { >> 1115 while (*cur && *cur != ' ' && *cur != '@') >> 1116 cur++; >> 1117 if (*cur == '@') { >> 1118 cur++; >> 1119 *crash_base = memparse(cur, &tmp); >> 1120 if (cur == tmp) { >> 1121 pr_warn("Memory value expected after '@'\n"); >> 1122 return -EINVAL; >> 1123 } >> 1124 } >> 1125 } >> 1126 >> 1127 return 0; >> 1128 } 917 1129 918 return ret; !! 1130 /* >> 1131 * That function parses "simple" (old) crashkernel command lines like >> 1132 * >> 1133 * crashkernel=size[@offset] >> 1134 * >> 1135 * It returns 0 on success and -EINVAL on failure. >> 1136 */ >> 1137 static int __init parse_crashkernel_simple(char *cmdline, >> 1138 unsigned long long *crash_size, >> 1139 unsigned long long *crash_base) >> 1140 { >> 1141 char *cur = cmdline; >> 1142 >> 1143 *crash_size = memparse(cmdline, &cur); >> 1144 if (cmdline == cur) { >> 1145 pr_warn("crashkernel: memory value expected\n"); >> 1146 return -EINVAL; 919 } 1147 } 920 1148 921 mutex_lock(&limit->mutex); !! 1149 if (*cur == '@') 922 val = limit->limit; !! 1150 *crash_base = memparse(cur+1, &cur); 923 mutex_unlock(&limit->mutex); !! 1151 else if (*cur != ' ' && *cur != '\0') { >> 1152 pr_warn("crashkernel: unrecognized char: %c\n", *cur); >> 1153 return -EINVAL; >> 1154 } 924 1155 925 return proc_dointvec(&tmp, write, buff !! 1156 return 0; 926 } 1157 } 927 1158 928 static struct ctl_table kexec_core_sysctls[] = !! 1159 #define SUFFIX_HIGH 0 929 { !! 1160 #define SUFFIX_LOW 1 930 .procname = "kexec_load_ !! 1161 #define SUFFIX_NULL 2 931 .data = &kexec_load_ !! 1162 static __initdata char *suffix_tbl[] = { 932 .maxlen = sizeof(int), !! 1163 [SUFFIX_HIGH] = ",high", 933 .mode = 0644, !! 1164 [SUFFIX_LOW] = ",low", 934 /* only handle a transition fr !! 1165 [SUFFIX_NULL] = NULL, 935 .proc_handler = proc_dointve << 936 .extra1 = SYSCTL_ONE, << 937 .extra2 = SYSCTL_ONE, << 938 }, << 939 { << 940 .procname = "kexec_load_ << 941 .data = &load_limit_ << 942 .mode = 0644, << 943 .proc_handler = kexec_limit_ << 944 }, << 945 { << 946 .procname = "kexec_load_ << 947 .data = &load_limit_ << 948 .mode = 0644, << 949 .proc_handler = kexec_limit_ << 950 }, << 951 }; 1166 }; 952 1167 953 static int __init kexec_core_sysctl_init(void) !! 1168 /* 954 { !! 1169 * That function parses "suffix" crashkernel command lines like 955 register_sysctl_init("kernel", kexec_c !! 1170 * >> 1171 * crashkernel=size,[high|low] >> 1172 * >> 1173 * It returns 0 on success and -EINVAL on failure. >> 1174 */ >> 1175 static int __init parse_crashkernel_suffix(char *cmdline, >> 1176 unsigned long long *crash_size, >> 1177 const char *suffix) >> 1178 { >> 1179 char *cur = cmdline; >> 1180 >> 1181 *crash_size = memparse(cmdline, &cur); >> 1182 if (cmdline == cur) { >> 1183 pr_warn("crashkernel: memory value expected\n"); >> 1184 return -EINVAL; >> 1185 } >> 1186 >> 1187 /* check with suffix */ >> 1188 if (strncmp(cur, suffix, strlen(suffix))) { >> 1189 pr_warn("crashkernel: unrecognized char: %c\n", *cur); >> 1190 return -EINVAL; >> 1191 } >> 1192 cur += strlen(suffix); >> 1193 if (*cur != ' ' && *cur != '\0') { >> 1194 pr_warn("crashkernel: unrecognized char: %c\n", *cur); >> 1195 return -EINVAL; >> 1196 } >> 1197 956 return 0; 1198 return 0; 957 } 1199 } 958 late_initcall(kexec_core_sysctl_init); << 959 #endif << 960 1200 961 bool kexec_load_permitted(int kexec_image_type !! 1201 static __init char *get_last_crashkernel(char *cmdline, >> 1202 const char *name, >> 1203 const char *suffix) >> 1204 { >> 1205 char *p = cmdline, *ck_cmdline = NULL; >> 1206 >> 1207 /* find crashkernel and use the last one if there are more */ >> 1208 p = strstr(p, name); >> 1209 while (p) { >> 1210 char *end_p = strchr(p, ' '); >> 1211 char *q; >> 1212 >> 1213 if (!end_p) >> 1214 end_p = p + strlen(p); >> 1215 >> 1216 if (!suffix) { >> 1217 int i; >> 1218 >> 1219 /* skip the one with any known suffix */ >> 1220 for (i = 0; suffix_tbl[i]; i++) { >> 1221 q = end_p - strlen(suffix_tbl[i]); >> 1222 if (!strncmp(q, suffix_tbl[i], >> 1223 strlen(suffix_tbl[i]))) >> 1224 goto next; >> 1225 } >> 1226 ck_cmdline = p; >> 1227 } else { >> 1228 q = end_p - strlen(suffix); >> 1229 if (!strncmp(q, suffix, strlen(suffix))) >> 1230 ck_cmdline = p; >> 1231 } >> 1232 next: >> 1233 p = strstr(p+1, name); >> 1234 } >> 1235 >> 1236 if (!ck_cmdline) >> 1237 return NULL; >> 1238 >> 1239 return ck_cmdline; >> 1240 } >> 1241 >> 1242 static int __init __parse_crashkernel(char *cmdline, >> 1243 unsigned long long system_ram, >> 1244 unsigned long long *crash_size, >> 1245 unsigned long long *crash_base, >> 1246 const char *name, >> 1247 const char *suffix) 962 { 1248 { 963 struct kexec_load_limit *limit; !! 1249 char *first_colon, *first_space; >> 1250 char *ck_cmdline; >> 1251 >> 1252 BUG_ON(!crash_size || !crash_base); >> 1253 *crash_size = 0; >> 1254 *crash_base = 0; 964 1255 >> 1256 ck_cmdline = get_last_crashkernel(cmdline, name, suffix); >> 1257 >> 1258 if (!ck_cmdline) >> 1259 return -EINVAL; >> 1260 >> 1261 ck_cmdline += strlen(name); >> 1262 >> 1263 if (suffix) >> 1264 return parse_crashkernel_suffix(ck_cmdline, crash_size, >> 1265 suffix); 965 /* 1266 /* 966 * Only the superuser can use the kexe !! 1267 * if the commandline contains a ':', then that's the extended 967 * been disabled. !! 1268 * syntax -- if not, it must be the classic syntax 968 */ 1269 */ 969 if (!capable(CAP_SYS_BOOT) || kexec_lo !! 1270 first_colon = strchr(ck_cmdline, ':'); 970 return false; !! 1271 first_space = strchr(ck_cmdline, ' '); >> 1272 if (first_colon && (!first_space || first_colon < first_space)) >> 1273 return parse_crashkernel_mem(ck_cmdline, system_ram, >> 1274 crash_size, crash_base); >> 1275 >> 1276 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); >> 1277 } >> 1278 >> 1279 /* >> 1280 * That function is the entry point for command line parsing and should be >> 1281 * called from the arch-specific code. >> 1282 */ >> 1283 int __init parse_crashkernel(char *cmdline, >> 1284 unsigned long long system_ram, >> 1285 unsigned long long *crash_size, >> 1286 unsigned long long *crash_base) >> 1287 { >> 1288 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, >> 1289 "crashkernel=", NULL); >> 1290 } >> 1291 >> 1292 int __init parse_crashkernel_high(char *cmdline, >> 1293 unsigned long long system_ram, >> 1294 unsigned long long *crash_size, >> 1295 unsigned long long *crash_base) >> 1296 { >> 1297 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, >> 1298 "crashkernel=", suffix_tbl[SUFFIX_HIGH]); >> 1299 } >> 1300 >> 1301 int __init parse_crashkernel_low(char *cmdline, >> 1302 unsigned long long system_ram, >> 1303 unsigned long long *crash_size, >> 1304 unsigned long long *crash_base) >> 1305 { >> 1306 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, >> 1307 "crashkernel=", suffix_tbl[SUFFIX_LOW]); >> 1308 } >> 1309 >> 1310 static void update_vmcoreinfo_note(void) >> 1311 { >> 1312 u32 *buf = vmcoreinfo_note; >> 1313 >> 1314 if (!vmcoreinfo_size) >> 1315 return; >> 1316 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, >> 1317 vmcoreinfo_size); >> 1318 final_note(buf); >> 1319 } >> 1320 >> 1321 void crash_save_vmcoreinfo(void) >> 1322 { >> 1323 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); >> 1324 update_vmcoreinfo_note(); >> 1325 } >> 1326 >> 1327 void vmcoreinfo_append_str(const char *fmt, ...) >> 1328 { >> 1329 va_list args; >> 1330 char buf[0x50]; >> 1331 size_t r; >> 1332 >> 1333 va_start(args, fmt); >> 1334 r = vscnprintf(buf, sizeof(buf), fmt, args); >> 1335 va_end(args); >> 1336 >> 1337 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); >> 1338 >> 1339 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); >> 1340 >> 1341 vmcoreinfo_size += r; >> 1342 } >> 1343 >> 1344 /* >> 1345 * provide an empty default implementation here -- architecture >> 1346 * code may override this >> 1347 */ >> 1348 void __weak arch_crash_save_vmcoreinfo(void) >> 1349 {} >> 1350 >> 1351 unsigned long __weak paddr_vmcoreinfo_note(void) >> 1352 { >> 1353 return __pa((unsigned long)(char *)&vmcoreinfo_note); >> 1354 } >> 1355 >> 1356 static int __init crash_save_vmcoreinfo_init(void) >> 1357 { >> 1358 VMCOREINFO_OSRELEASE(init_uts_ns.name.release); >> 1359 VMCOREINFO_PAGESIZE(PAGE_SIZE); 971 1360 972 /* Check limit counter and decrease it !! 1361 VMCOREINFO_SYMBOL(init_uts_ns); 973 limit = (kexec_image_type == KEXEC_TYP !! 1362 VMCOREINFO_SYMBOL(node_online_map); 974 &load_limit_panic : &load_limi !! 1363 #ifdef CONFIG_MMU 975 mutex_lock(&limit->mutex); !! 1364 VMCOREINFO_SYMBOL(swapper_pg_dir); 976 if (!limit->limit) { !! 1365 #endif 977 mutex_unlock(&limit->mutex); !! 1366 VMCOREINFO_SYMBOL(_stext); 978 return false; !! 1367 VMCOREINFO_SYMBOL(vmap_area_list); 979 } !! 1368 980 if (limit->limit != -1) !! 1369 #ifndef CONFIG_NEED_MULTIPLE_NODES 981 limit->limit--; !! 1370 VMCOREINFO_SYMBOL(mem_map); 982 mutex_unlock(&limit->mutex); !! 1371 VMCOREINFO_SYMBOL(contig_page_data); >> 1372 #endif >> 1373 #ifdef CONFIG_SPARSEMEM >> 1374 VMCOREINFO_SYMBOL(mem_section); >> 1375 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); >> 1376 VMCOREINFO_STRUCT_SIZE(mem_section); >> 1377 VMCOREINFO_OFFSET(mem_section, section_mem_map); >> 1378 #endif >> 1379 VMCOREINFO_STRUCT_SIZE(page); >> 1380 VMCOREINFO_STRUCT_SIZE(pglist_data); >> 1381 VMCOREINFO_STRUCT_SIZE(zone); >> 1382 VMCOREINFO_STRUCT_SIZE(free_area); >> 1383 VMCOREINFO_STRUCT_SIZE(list_head); >> 1384 VMCOREINFO_SIZE(nodemask_t); >> 1385 VMCOREINFO_OFFSET(page, flags); >> 1386 VMCOREINFO_OFFSET(page, _count); >> 1387 VMCOREINFO_OFFSET(page, mapping); >> 1388 VMCOREINFO_OFFSET(page, lru); >> 1389 VMCOREINFO_OFFSET(page, _mapcount); >> 1390 VMCOREINFO_OFFSET(page, private); >> 1391 VMCOREINFO_OFFSET(pglist_data, node_zones); >> 1392 VMCOREINFO_OFFSET(pglist_data, nr_zones); >> 1393 #ifdef CONFIG_FLAT_NODE_MEM_MAP >> 1394 VMCOREINFO_OFFSET(pglist_data, node_mem_map); >> 1395 #endif >> 1396 VMCOREINFO_OFFSET(pglist_data, node_start_pfn); >> 1397 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); >> 1398 VMCOREINFO_OFFSET(pglist_data, node_id); >> 1399 VMCOREINFO_OFFSET(zone, free_area); >> 1400 VMCOREINFO_OFFSET(zone, vm_stat); >> 1401 VMCOREINFO_OFFSET(zone, spanned_pages); >> 1402 VMCOREINFO_OFFSET(free_area, free_list); >> 1403 VMCOREINFO_OFFSET(list_head, next); >> 1404 VMCOREINFO_OFFSET(list_head, prev); >> 1405 VMCOREINFO_OFFSET(vmap_area, va_start); >> 1406 VMCOREINFO_OFFSET(vmap_area, list); >> 1407 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); >> 1408 log_buf_kexec_setup(); >> 1409 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); >> 1410 VMCOREINFO_NUMBER(NR_FREE_PAGES); >> 1411 VMCOREINFO_NUMBER(PG_lru); >> 1412 VMCOREINFO_NUMBER(PG_private); >> 1413 VMCOREINFO_NUMBER(PG_swapcache); >> 1414 VMCOREINFO_NUMBER(PG_slab); >> 1415 #ifdef CONFIG_MEMORY_FAILURE >> 1416 VMCOREINFO_NUMBER(PG_hwpoison); >> 1417 #endif >> 1418 VMCOREINFO_NUMBER(PG_head_mask); >> 1419 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); >> 1420 #ifdef CONFIG_X86 >> 1421 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); >> 1422 #endif >> 1423 #ifdef CONFIG_HUGETLBFS >> 1424 VMCOREINFO_SYMBOL(free_huge_page); >> 1425 #endif 983 1426 984 return true; !! 1427 arch_crash_save_vmcoreinfo(); >> 1428 update_vmcoreinfo_note(); >> 1429 >> 1430 return 0; 985 } 1431 } 986 1432 >> 1433 subsys_initcall(crash_save_vmcoreinfo_init); >> 1434 987 /* 1435 /* 988 * Move into place and start executing a prelo 1436 * Move into place and start executing a preloaded standalone 989 * executable. If nothing was preloaded retur 1437 * executable. If nothing was preloaded return an error. 990 */ 1438 */ 991 int kernel_kexec(void) 1439 int kernel_kexec(void) 992 { 1440 { 993 int error = 0; 1441 int error = 0; 994 1442 995 if (!kexec_trylock()) !! 1443 if (!mutex_trylock(&kexec_mutex)) 996 return -EBUSY; 1444 return -EBUSY; 997 if (!kexec_image) { 1445 if (!kexec_image) { 998 error = -EINVAL; 1446 error = -EINVAL; 999 goto Unlock; 1447 goto Unlock; 1000 } 1448 } 1001 1449 1002 #ifdef CONFIG_KEXEC_JUMP 1450 #ifdef CONFIG_KEXEC_JUMP 1003 if (kexec_image->preserve_context) { 1451 if (kexec_image->preserve_context) { >> 1452 lock_system_sleep(); 1004 pm_prepare_console(); 1453 pm_prepare_console(); 1005 error = freeze_processes(); 1454 error = freeze_processes(); 1006 if (error) { 1455 if (error) { 1007 error = -EBUSY; 1456 error = -EBUSY; 1008 goto Restore_console; 1457 goto Restore_console; 1009 } 1458 } 1010 suspend_console(); 1459 suspend_console(); 1011 error = dpm_suspend_start(PMS 1460 error = dpm_suspend_start(PMSG_FREEZE); 1012 if (error) 1461 if (error) 1013 goto Resume_console; 1462 goto Resume_console; 1014 /* At this point, dpm_suspend 1463 /* At this point, dpm_suspend_start() has been called, 1015 * but *not* dpm_suspend_end( 1464 * but *not* dpm_suspend_end(). We *must* call 1016 * dpm_suspend_end() now. Ot 1465 * dpm_suspend_end() now. Otherwise, drivers for 1017 * some devices (e.g. interru 1466 * some devices (e.g. interrupt controllers) become 1018 * desynchronized with the ac 1467 * desynchronized with the actual state of the 1019 * hardware at resume time, a 1468 * hardware at resume time, and evil weirdness ensues. 1020 */ 1469 */ 1021 error = dpm_suspend_end(PMSG_ 1470 error = dpm_suspend_end(PMSG_FREEZE); 1022 if (error) 1471 if (error) 1023 goto Resume_devices; 1472 goto Resume_devices; 1024 error = suspend_disable_secon !! 1473 error = disable_nonboot_cpus(); 1025 if (error) 1474 if (error) 1026 goto Enable_cpus; 1475 goto Enable_cpus; 1027 local_irq_disable(); 1476 local_irq_disable(); 1028 error = syscore_suspend(); 1477 error = syscore_suspend(); 1029 if (error) 1478 if (error) 1030 goto Enable_irqs; 1479 goto Enable_irqs; 1031 } else 1480 } else 1032 #endif 1481 #endif 1033 { 1482 { 1034 kexec_in_progress = true; 1483 kexec_in_progress = true; 1035 kernel_restart_prepare("kexec !! 1484 kernel_restart_prepare(NULL); 1036 migrate_to_reboot_cpu(); 1485 migrate_to_reboot_cpu(); 1037 syscore_shutdown(); << 1038 1486 1039 /* 1487 /* 1040 * migrate_to_reboot_cpu() di 1488 * migrate_to_reboot_cpu() disables CPU hotplug assuming that 1041 * no further code needs to u 1489 * no further code needs to use CPU hotplug (which is true in 1042 * the reboot case). However, 1490 * the reboot case). However, the kexec path depends on using 1043 * CPU hotplug again; so re-e 1491 * CPU hotplug again; so re-enable it here. 1044 */ 1492 */ 1045 cpu_hotplug_enable(); 1493 cpu_hotplug_enable(); 1046 pr_notice("Starting new kerne !! 1494 pr_emerg("Starting new kernel\n"); 1047 machine_shutdown(); 1495 machine_shutdown(); 1048 } 1496 } 1049 1497 1050 kmsg_dump(KMSG_DUMP_SHUTDOWN); << 1051 machine_kexec(kexec_image); 1498 machine_kexec(kexec_image); 1052 1499 1053 #ifdef CONFIG_KEXEC_JUMP 1500 #ifdef CONFIG_KEXEC_JUMP 1054 if (kexec_image->preserve_context) { 1501 if (kexec_image->preserve_context) { 1055 syscore_resume(); 1502 syscore_resume(); 1056 Enable_irqs: 1503 Enable_irqs: 1057 local_irq_enable(); 1504 local_irq_enable(); 1058 Enable_cpus: 1505 Enable_cpus: 1059 suspend_enable_secondary_cpus !! 1506 enable_nonboot_cpus(); 1060 dpm_resume_start(PMSG_RESTORE 1507 dpm_resume_start(PMSG_RESTORE); 1061 Resume_devices: 1508 Resume_devices: 1062 dpm_resume_end(PMSG_RESTORE); 1509 dpm_resume_end(PMSG_RESTORE); 1063 Resume_console: 1510 Resume_console: 1064 resume_console(); 1511 resume_console(); 1065 thaw_processes(); 1512 thaw_processes(); 1066 Restore_console: 1513 Restore_console: 1067 pm_restore_console(); 1514 pm_restore_console(); >> 1515 unlock_system_sleep(); 1068 } 1516 } 1069 #endif 1517 #endif 1070 1518 1071 Unlock: 1519 Unlock: 1072 kexec_unlock(); !! 1520 mutex_unlock(&kexec_mutex); 1073 return error; 1521 return error; 1074 } 1522 } >> 1523 >> 1524 /* >> 1525 * Add and remove page tables for crashkernel memory >> 1526 * >> 1527 * Provide an empty default implementation here -- architecture >> 1528 * code may override this >> 1529 */ >> 1530 void __weak crash_map_reserved_pages(void) >> 1531 {} >> 1532 >> 1533 void __weak crash_unmap_reserved_pages(void) >> 1534 {} 1075 1535
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.