1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 #include <linux/version.h> 15 #include <linux/pstore.h> 16 #include <linux/io.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/compiler.h> 21 #include <linux/of.h> 22 #include <linux/of_address.h> 23 #include <linux/mm.h> 24 25 #include "internal.h" 26 #include "ram_internal.h" 27 28 #define RAMOOPS_KERNMSG_HDR "====" 29 #define MIN_MEM_SIZE 4096UL 30 31 static ulong record_size = MIN_MEM_SIZE; 32 module_param(record_size, ulong, 0400); 33 MODULE_PARM_DESC(record_size, 34 "size of each dump done on oops/panic"); 35 36 static ulong ramoops_console_size = MIN_MEM_SIZE; 37 module_param_named(console_size, ramoops_console_size, ulong, 0400); 38 MODULE_PARM_DESC(console_size, "size of kernel console log"); 39 40 static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 41 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 42 MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 43 44 static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 45 module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 46 MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 47 48 static unsigned long long mem_address; 49 module_param_hw(mem_address, ullong, other, 0400); 50 MODULE_PARM_DESC(mem_address, 51 "start of reserved RAM used to store oops/panic logs"); 52 53 static char *mem_name; 54 module_param_named(mem_name, mem_name, charp, 0400); 55 MODULE_PARM_DESC(mem_name, "name of kernel param that holds addr"); 56 57 static ulong mem_size; 58 module_param(mem_size, ulong, 0400); 59 MODULE_PARM_DESC(mem_size, 60 "size of reserved RAM used to store oops/panic logs"); 61 62 static unsigned int mem_type; 63 module_param(mem_type, uint, 0400); 64 MODULE_PARM_DESC(mem_type, 65 "memory type: 0=write-combined (default), 1=unbuffered, 2=cached"); 66 67 static int ramoops_max_reason = -1; 68 module_param_named(max_reason, ramoops_max_reason, int, 0400); 69 MODULE_PARM_DESC(max_reason, 70 "maximum reason for kmsg dump (default 2: Oops and Panic) "); 71 72 static int ramoops_ecc; 73 module_param_named(ecc, ramoops_ecc, int, 0400); 74 MODULE_PARM_DESC(ramoops_ecc, 75 "if non-zero, the option enables ECC support and specifies " 76 "ECC buffer size in bytes (1 is a special value, means 16 " 77 "bytes ECC)"); 78 79 static int ramoops_dump_oops = -1; 80 module_param_named(dump_oops, ramoops_dump_oops, int, 0400); 81 MODULE_PARM_DESC(dump_oops, 82 "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics"); 83 84 struct ramoops_context { 85 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 86 struct persistent_ram_zone *cprz; /* Console zone */ 87 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 88 struct persistent_ram_zone *mprz; /* PMSG zone */ 89 phys_addr_t phys_addr; 90 unsigned long size; 91 unsigned int memtype; 92 size_t record_size; 93 size_t console_size; 94 size_t ftrace_size; 95 size_t pmsg_size; 96 u32 flags; 97 struct persistent_ram_ecc_info ecc_info; 98 unsigned int max_dump_cnt; 99 unsigned int dump_write_cnt; 100 /* _read_cnt need clear on ramoops_pstore_open */ 101 unsigned int dump_read_cnt; 102 unsigned int console_read_cnt; 103 unsigned int max_ftrace_cnt; 104 unsigned int ftrace_read_cnt; 105 unsigned int pmsg_read_cnt; 106 struct pstore_info pstore; 107 }; 108 109 static struct platform_device *dummy; 110 111 static int ramoops_pstore_open(struct pstore_info *psi) 112 { 113 struct ramoops_context *cxt = psi->data; 114 115 cxt->dump_read_cnt = 0; 116 cxt->console_read_cnt = 0; 117 cxt->ftrace_read_cnt = 0; 118 cxt->pmsg_read_cnt = 0; 119 return 0; 120 } 121 122 static struct persistent_ram_zone * 123 ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 124 struct pstore_record *record) 125 { 126 struct persistent_ram_zone *prz; 127 128 /* Give up if we never existed or have hit the end. */ 129 if (!przs) 130 return NULL; 131 132 prz = przs[id]; 133 if (!prz) 134 return NULL; 135 136 /* Update old/shadowed buffer. */ 137 if (prz->type == PSTORE_TYPE_DMESG) 138 persistent_ram_save_old(prz); 139 140 if (!persistent_ram_old_size(prz)) 141 return NULL; 142 143 record->type = prz->type; 144 record->id = id; 145 146 return prz; 147 } 148 149 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 150 bool *compressed) 151 { 152 char data_type; 153 int header_length = 0; 154 155 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 156 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 157 &header_length) == 3) { 158 time->tv_nsec *= 1000; 159 if (data_type == 'C') 160 *compressed = true; 161 else 162 *compressed = false; 163 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 164 (time64_t *)&time->tv_sec, &time->tv_nsec, 165 &header_length) == 2) { 166 time->tv_nsec *= 1000; 167 *compressed = false; 168 } else { 169 time->tv_sec = 0; 170 time->tv_nsec = 0; 171 *compressed = false; 172 } 173 return header_length; 174 } 175 176 static bool prz_ok(struct persistent_ram_zone *prz) 177 { 178 return !!prz && !!(persistent_ram_old_size(prz) + 179 persistent_ram_ecc_string(prz, NULL, 0)); 180 } 181 182 static ssize_t ramoops_pstore_read(struct pstore_record *record) 183 { 184 ssize_t size = 0; 185 struct ramoops_context *cxt = record->psi->data; 186 struct persistent_ram_zone *prz = NULL; 187 int header_length = 0; 188 bool free_prz = false; 189 190 /* 191 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 192 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 193 * valid time stamps, so it is initialized to zero. 194 */ 195 record->time.tv_sec = 0; 196 record->time.tv_nsec = 0; 197 record->compressed = false; 198 199 /* Find the next valid persistent_ram_zone for DMESG */ 200 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 201 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 202 record); 203 if (!prz_ok(prz)) 204 continue; 205 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 206 &record->time, 207 &record->compressed); 208 /* Clear and skip this DMESG record if it has no valid header */ 209 if (!header_length) { 210 persistent_ram_free_old(prz); 211 persistent_ram_zap(prz); 212 prz = NULL; 213 } 214 } 215 216 if (!prz_ok(prz) && !cxt->console_read_cnt++) 217 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 218 219 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 220 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 221 222 /* ftrace is last since it may want to dynamically allocate memory. */ 223 if (!prz_ok(prz)) { 224 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 225 !cxt->ftrace_read_cnt++) { 226 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 227 record); 228 } else { 229 /* 230 * Build a new dummy record which combines all the 231 * per-cpu records including metadata and ecc info. 232 */ 233 struct persistent_ram_zone *tmp_prz, *prz_next; 234 235 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 236 GFP_KERNEL); 237 if (!tmp_prz) 238 return -ENOMEM; 239 prz = tmp_prz; 240 free_prz = true; 241 242 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 243 prz_next = ramoops_get_next_prz(cxt->fprzs, 244 cxt->ftrace_read_cnt++, record); 245 246 if (!prz_ok(prz_next)) 247 continue; 248 249 tmp_prz->ecc_info = prz_next->ecc_info; 250 tmp_prz->corrected_bytes += 251 prz_next->corrected_bytes; 252 tmp_prz->bad_blocks += prz_next->bad_blocks; 253 254 size = pstore_ftrace_combine_log( 255 &tmp_prz->old_log, 256 &tmp_prz->old_log_size, 257 prz_next->old_log, 258 prz_next->old_log_size); 259 if (size) 260 goto out; 261 } 262 record->id = 0; 263 } 264 } 265 266 if (!prz_ok(prz)) { 267 size = 0; 268 goto out; 269 } 270 271 size = persistent_ram_old_size(prz) - header_length; 272 273 /* ECC correction notice */ 274 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 275 276 record->buf = kvzalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 277 if (record->buf == NULL) { 278 size = -ENOMEM; 279 goto out; 280 } 281 282 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 283 size); 284 285 persistent_ram_ecc_string(prz, record->buf + size, 286 record->ecc_notice_size + 1); 287 288 out: 289 if (free_prz) { 290 kvfree(prz->old_log); 291 kfree(prz); 292 } 293 294 return size; 295 } 296 297 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 298 struct pstore_record *record) 299 { 300 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 301 size_t len; 302 303 len = scnprintf(hdr, sizeof(hdr), 304 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 305 (time64_t)record->time.tv_sec, 306 record->time.tv_nsec / 1000, 307 record->compressed ? 'C' : 'D'); 308 persistent_ram_write(prz, hdr, len); 309 310 return len; 311 } 312 313 static int notrace ramoops_pstore_write(struct pstore_record *record) 314 { 315 struct ramoops_context *cxt = record->psi->data; 316 struct persistent_ram_zone *prz; 317 size_t size, hlen; 318 319 if (record->type == PSTORE_TYPE_CONSOLE) { 320 if (!cxt->cprz) 321 return -ENOMEM; 322 persistent_ram_write(cxt->cprz, record->buf, record->size); 323 return 0; 324 } else if (record->type == PSTORE_TYPE_FTRACE) { 325 int zonenum; 326 327 if (!cxt->fprzs) 328 return -ENOMEM; 329 /* 330 * Choose zone by if we're using per-cpu buffers. 331 */ 332 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 333 zonenum = smp_processor_id(); 334 else 335 zonenum = 0; 336 337 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 338 record->size); 339 return 0; 340 } else if (record->type == PSTORE_TYPE_PMSG) { 341 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 342 return -EINVAL; 343 } 344 345 if (record->type != PSTORE_TYPE_DMESG) 346 return -EINVAL; 347 348 /* 349 * We could filter on record->reason here if we wanted to (which 350 * would duplicate what happened before the "max_reason" setting 351 * was added), but that would defeat the purpose of a system 352 * changing printk.always_kmsg_dump, so instead log everything that 353 * the kmsg dumper sends us, since it should be doing the filtering 354 * based on the combination of printk.always_kmsg_dump and our 355 * requested "max_reason". 356 */ 357 358 /* 359 * Explicitly only take the first part of any new crash. 360 * If our buffer is larger than kmsg_bytes, this can never happen, 361 * and if our buffer is smaller than kmsg_bytes, we don't want the 362 * report split across multiple records. 363 */ 364 if (record->part != 1) 365 return -ENOSPC; 366 367 if (!cxt->dprzs) 368 return -ENOSPC; 369 370 prz = cxt->dprzs[cxt->dump_write_cnt]; 371 372 /* 373 * Since this is a new crash dump, we need to reset the buffer in 374 * case it still has an old dump present. Without this, the new dump 375 * will get appended, which would seriously confuse anything trying 376 * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 377 * expects to find a dump header in the beginning of buffer data, so 378 * we must to reset the buffer values, in order to ensure that the 379 * header will be written to the beginning of the buffer. 380 */ 381 persistent_ram_zap(prz); 382 383 /* Build header and append record contents. */ 384 hlen = ramoops_write_kmsg_hdr(prz, record); 385 if (!hlen) 386 return -ENOMEM; 387 388 size = record->size; 389 if (size + hlen > prz->buffer_size) 390 size = prz->buffer_size - hlen; 391 persistent_ram_write(prz, record->buf, size); 392 393 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 394 395 return 0; 396 } 397 398 static int notrace ramoops_pstore_write_user(struct pstore_record *record, 399 const char __user *buf) 400 { 401 if (record->type == PSTORE_TYPE_PMSG) { 402 struct ramoops_context *cxt = record->psi->data; 403 404 if (!cxt->mprz) 405 return -ENOMEM; 406 return persistent_ram_write_user(cxt->mprz, buf, record->size); 407 } 408 409 return -EINVAL; 410 } 411 412 static int ramoops_pstore_erase(struct pstore_record *record) 413 { 414 struct ramoops_context *cxt = record->psi->data; 415 struct persistent_ram_zone *prz; 416 417 switch (record->type) { 418 case PSTORE_TYPE_DMESG: 419 if (record->id >= cxt->max_dump_cnt) 420 return -EINVAL; 421 prz = cxt->dprzs[record->id]; 422 break; 423 case PSTORE_TYPE_CONSOLE: 424 prz = cxt->cprz; 425 break; 426 case PSTORE_TYPE_FTRACE: 427 if (record->id >= cxt->max_ftrace_cnt) 428 return -EINVAL; 429 prz = cxt->fprzs[record->id]; 430 break; 431 case PSTORE_TYPE_PMSG: 432 prz = cxt->mprz; 433 break; 434 default: 435 return -EINVAL; 436 } 437 438 persistent_ram_free_old(prz); 439 persistent_ram_zap(prz); 440 441 return 0; 442 } 443 444 static struct ramoops_context oops_cxt = { 445 .pstore = { 446 .owner = THIS_MODULE, 447 .name = "ramoops", 448 .open = ramoops_pstore_open, 449 .read = ramoops_pstore_read, 450 .write = ramoops_pstore_write, 451 .write_user = ramoops_pstore_write_user, 452 .erase = ramoops_pstore_erase, 453 }, 454 }; 455 456 static void ramoops_free_przs(struct ramoops_context *cxt) 457 { 458 int i; 459 460 /* Free pmsg PRZ */ 461 persistent_ram_free(&cxt->mprz); 462 463 /* Free console PRZ */ 464 persistent_ram_free(&cxt->cprz); 465 466 /* Free dump PRZs */ 467 if (cxt->dprzs) { 468 for (i = 0; i < cxt->max_dump_cnt; i++) 469 persistent_ram_free(&cxt->dprzs[i]); 470 471 kfree(cxt->dprzs); 472 cxt->dprzs = NULL; 473 cxt->max_dump_cnt = 0; 474 } 475 476 /* Free ftrace PRZs */ 477 if (cxt->fprzs) { 478 for (i = 0; i < cxt->max_ftrace_cnt; i++) 479 persistent_ram_free(&cxt->fprzs[i]); 480 kfree(cxt->fprzs); 481 cxt->fprzs = NULL; 482 cxt->max_ftrace_cnt = 0; 483 } 484 } 485 486 static int ramoops_init_przs(const char *name, 487 struct device *dev, struct ramoops_context *cxt, 488 struct persistent_ram_zone ***przs, 489 phys_addr_t *paddr, size_t mem_sz, 490 ssize_t record_size, 491 unsigned int *cnt, u32 sig, u32 flags) 492 { 493 int err = -ENOMEM; 494 int i; 495 size_t zone_sz; 496 struct persistent_ram_zone **prz_ar; 497 498 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 499 if (mem_sz == 0 || record_size == 0) { 500 *cnt = 0; 501 return 0; 502 } 503 504 /* 505 * If we have a negative record size, calculate it based on 506 * mem_sz / *cnt. If we have a positive record size, calculate 507 * cnt from mem_sz / record_size. 508 */ 509 if (record_size < 0) { 510 if (*cnt == 0) 511 return 0; 512 record_size = mem_sz / *cnt; 513 if (record_size == 0) { 514 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 515 name, mem_sz, *cnt); 516 goto fail; 517 } 518 } else { 519 *cnt = mem_sz / record_size; 520 if (*cnt == 0) { 521 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 522 name, mem_sz, record_size); 523 goto fail; 524 } 525 } 526 527 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 528 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 529 name, 530 mem_sz, (unsigned long long)*paddr, 531 cxt->size, (unsigned long long)cxt->phys_addr); 532 goto fail; 533 } 534 535 zone_sz = mem_sz / *cnt; 536 zone_sz = ALIGN_DOWN(zone_sz, 2); 537 if (!zone_sz) { 538 dev_err(dev, "%s zone size == 0\n", name); 539 goto fail; 540 } 541 542 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 543 if (!prz_ar) 544 goto fail; 545 546 for (i = 0; i < *cnt; i++) { 547 char *label; 548 549 if (*cnt == 1) 550 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 551 else 552 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 553 name, i, *cnt - 1); 554 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 555 &cxt->ecc_info, 556 cxt->memtype, flags, label); 557 kfree(label); 558 if (IS_ERR(prz_ar[i])) { 559 err = PTR_ERR(prz_ar[i]); 560 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 561 name, record_size, 562 (unsigned long long)*paddr, err); 563 564 while (i > 0) { 565 i--; 566 persistent_ram_free(&prz_ar[i]); 567 } 568 kfree(prz_ar); 569 prz_ar = NULL; 570 goto fail; 571 } 572 *paddr += zone_sz; 573 prz_ar[i]->type = pstore_name_to_type(name); 574 } 575 576 *przs = prz_ar; 577 return 0; 578 579 fail: 580 *cnt = 0; 581 return err; 582 } 583 584 static int ramoops_init_prz(const char *name, 585 struct device *dev, struct ramoops_context *cxt, 586 struct persistent_ram_zone **prz, 587 phys_addr_t *paddr, size_t sz, u32 sig) 588 { 589 char *label; 590 591 if (!sz) 592 return 0; 593 594 if (*paddr + sz - cxt->phys_addr > cxt->size) { 595 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 596 name, sz, (unsigned long long)*paddr, 597 cxt->size, (unsigned long long)cxt->phys_addr); 598 return -ENOMEM; 599 } 600 601 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 602 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 603 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 604 kfree(label); 605 if (IS_ERR(*prz)) { 606 int err = PTR_ERR(*prz); 607 608 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 609 name, sz, (unsigned long long)*paddr, err); 610 return err; 611 } 612 613 *paddr += sz; 614 (*prz)->type = pstore_name_to_type(name); 615 616 return 0; 617 } 618 619 /* Read a u32 from a dt property and make sure it's safe for an int. */ 620 static int ramoops_parse_dt_u32(struct platform_device *pdev, 621 const char *propname, 622 u32 default_value, u32 *value) 623 { 624 u32 val32 = 0; 625 int ret; 626 627 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 628 if (ret == -EINVAL) { 629 /* field is missing, use default value. */ 630 val32 = default_value; 631 } else if (ret < 0) { 632 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 633 propname, ret); 634 return ret; 635 } 636 637 /* Sanity check our results. */ 638 if (val32 > INT_MAX) { 639 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 640 return -EOVERFLOW; 641 } 642 643 *value = val32; 644 return 0; 645 } 646 647 static int ramoops_parse_dt(struct platform_device *pdev, 648 struct ramoops_platform_data *pdata) 649 { 650 struct device_node *of_node = pdev->dev.of_node; 651 struct device_node *parent_node; 652 struct resource *res; 653 u32 value; 654 int ret; 655 656 dev_dbg(&pdev->dev, "using Device Tree\n"); 657 658 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 659 if (!res) { 660 dev_err(&pdev->dev, 661 "failed to locate DT /reserved-memory resource\n"); 662 return -EINVAL; 663 } 664 665 pdata->mem_size = resource_size(res); 666 pdata->mem_address = res->start; 667 /* 668 * Setting "unbuffered" is deprecated and will be ignored if 669 * "mem_type" is also specified. 670 */ 671 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 672 /* 673 * Setting "no-dump-oops" is deprecated and will be ignored if 674 * "max_reason" is also specified. 675 */ 676 if (of_property_read_bool(of_node, "no-dump-oops")) 677 pdata->max_reason = KMSG_DUMP_PANIC; 678 else 679 pdata->max_reason = KMSG_DUMP_OOPS; 680 681 #define parse_u32(name, field, default_value) { \ 682 ret = ramoops_parse_dt_u32(pdev, name, default_value, \ 683 &value); \ 684 if (ret < 0) \ 685 return ret; \ 686 field = value; \ 687 } 688 689 parse_u32("mem-type", pdata->mem_type, pdata->mem_type); 690 parse_u32("record-size", pdata->record_size, 0); 691 parse_u32("console-size", pdata->console_size, 0); 692 parse_u32("ftrace-size", pdata->ftrace_size, 0); 693 parse_u32("pmsg-size", pdata->pmsg_size, 0); 694 parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); 695 parse_u32("flags", pdata->flags, 0); 696 parse_u32("max-reason", pdata->max_reason, pdata->max_reason); 697 698 #undef parse_u32 699 700 /* 701 * Some old Chromebooks relied on the kernel setting the 702 * console_size and pmsg_size to the record size since that's 703 * what the downstream kernel did. These same Chromebooks had 704 * "ramoops" straight under the root node which isn't 705 * according to the current upstream bindings (though it was 706 * arguably acceptable under a prior version of the bindings). 707 * Let's make those old Chromebooks work by detecting that 708 * we're not a child of "reserved-memory" and mimicking the 709 * expected behavior. 710 */ 711 parent_node = of_get_parent(of_node); 712 if (!of_node_name_eq(parent_node, "reserved-memory") && 713 !pdata->console_size && !pdata->ftrace_size && 714 !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { 715 pdata->console_size = pdata->record_size; 716 pdata->pmsg_size = pdata->record_size; 717 } 718 of_node_put(parent_node); 719 720 return 0; 721 } 722 723 static int ramoops_probe(struct platform_device *pdev) 724 { 725 struct device *dev = &pdev->dev; 726 struct ramoops_platform_data *pdata = dev->platform_data; 727 struct ramoops_platform_data pdata_local; 728 struct ramoops_context *cxt = &oops_cxt; 729 size_t dump_mem_sz; 730 phys_addr_t paddr; 731 int err = -EINVAL; 732 733 /* 734 * Only a single ramoops area allowed at a time, so fail extra 735 * probes. 736 */ 737 if (cxt->max_dump_cnt) { 738 pr_err("already initialized\n"); 739 goto fail_out; 740 } 741 742 if (dev_of_node(dev) && !pdata) { 743 pdata = &pdata_local; 744 memset(pdata, 0, sizeof(*pdata)); 745 746 err = ramoops_parse_dt(pdev, pdata); 747 if (err < 0) 748 goto fail_out; 749 } 750 751 /* Make sure we didn't get bogus platform data pointer. */ 752 if (!pdata) { 753 pr_err("NULL platform data\n"); 754 err = -EINVAL; 755 goto fail_out; 756 } 757 758 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 759 !pdata->ftrace_size && !pdata->pmsg_size)) { 760 pr_err("The memory size and the record/console size must be " 761 "non-zero\n"); 762 err = -EINVAL; 763 goto fail_out; 764 } 765 766 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 767 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 768 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 769 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 770 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 771 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 772 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 773 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 774 775 cxt->size = pdata->mem_size; 776 cxt->phys_addr = pdata->mem_address; 777 cxt->memtype = pdata->mem_type; 778 cxt->record_size = pdata->record_size; 779 cxt->console_size = pdata->console_size; 780 cxt->ftrace_size = pdata->ftrace_size; 781 cxt->pmsg_size = pdata->pmsg_size; 782 cxt->flags = pdata->flags; 783 cxt->ecc_info = pdata->ecc_info; 784 785 paddr = cxt->phys_addr; 786 787 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 788 - cxt->pmsg_size; 789 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 790 dump_mem_sz, cxt->record_size, 791 &cxt->max_dump_cnt, 0, 0); 792 if (err) 793 goto fail_init; 794 795 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 796 cxt->console_size, 0); 797 if (err) 798 goto fail_init; 799 800 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 801 cxt->pmsg_size, 0); 802 if (err) 803 goto fail_init; 804 805 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 806 ? nr_cpu_ids 807 : 1; 808 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 809 cxt->ftrace_size, -1, 810 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 811 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 812 ? PRZ_FLAG_NO_LOCK : 0); 813 if (err) 814 goto fail_init; 815 816 cxt->pstore.data = cxt; 817 /* 818 * Prepare frontend flags based on which areas are initialized. 819 * For ramoops_init_przs() cases, the "max count" variable tells 820 * if there are regions present. For ramoops_init_prz() cases, 821 * the single region size is how to check. 822 */ 823 cxt->pstore.flags = 0; 824 if (cxt->max_dump_cnt) { 825 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 826 cxt->pstore.max_reason = pdata->max_reason; 827 } 828 if (cxt->console_size) 829 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 830 if (cxt->max_ftrace_cnt) 831 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 832 if (cxt->pmsg_size) 833 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 834 835 /* 836 * Since bufsize is only used for dmesg crash dumps, it 837 * must match the size of the dprz record (after PRZ header 838 * and ECC bytes have been accounted for). 839 */ 840 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 841 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 842 cxt->pstore.buf = kvzalloc(cxt->pstore.bufsize, GFP_KERNEL); 843 if (!cxt->pstore.buf) { 844 pr_err("cannot allocate pstore crash dump buffer\n"); 845 err = -ENOMEM; 846 goto fail_clear; 847 } 848 } 849 850 err = pstore_register(&cxt->pstore); 851 if (err) { 852 pr_err("registering with pstore failed\n"); 853 goto fail_buf; 854 } 855 856 /* 857 * Update the module parameter variables as well so they are visible 858 * through /sys/module/ramoops/parameters/ 859 */ 860 mem_size = pdata->mem_size; 861 mem_address = pdata->mem_address; 862 record_size = pdata->record_size; 863 ramoops_max_reason = pdata->max_reason; 864 ramoops_console_size = pdata->console_size; 865 ramoops_pmsg_size = pdata->pmsg_size; 866 ramoops_ftrace_size = pdata->ftrace_size; 867 868 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 869 cxt->size, (unsigned long long)cxt->phys_addr, 870 cxt->ecc_info.ecc_size); 871 872 return 0; 873 874 fail_buf: 875 kvfree(cxt->pstore.buf); 876 fail_clear: 877 cxt->pstore.bufsize = 0; 878 fail_init: 879 ramoops_free_przs(cxt); 880 fail_out: 881 return err; 882 } 883 884 static void ramoops_remove(struct platform_device *pdev) 885 { 886 struct ramoops_context *cxt = &oops_cxt; 887 888 pstore_unregister(&cxt->pstore); 889 890 kvfree(cxt->pstore.buf); 891 cxt->pstore.bufsize = 0; 892 893 ramoops_free_przs(cxt); 894 } 895 896 static const struct of_device_id dt_match[] = { 897 { .compatible = "ramoops" }, 898 {} 899 }; 900 MODULE_DEVICE_TABLE(of, dt_match); 901 902 static struct platform_driver ramoops_driver = { 903 .probe = ramoops_probe, 904 .remove_new = ramoops_remove, 905 .driver = { 906 .name = "ramoops", 907 .of_match_table = dt_match, 908 }, 909 }; 910 911 static inline void ramoops_unregister_dummy(void) 912 { 913 platform_device_unregister(dummy); 914 dummy = NULL; 915 } 916 917 static void __init ramoops_register_dummy(void) 918 { 919 struct ramoops_platform_data pdata; 920 921 if (mem_name) { 922 phys_addr_t start; 923 phys_addr_t size; 924 925 if (reserve_mem_find_by_name(mem_name, &start, &size)) { 926 mem_address = start; 927 mem_size = size; 928 } 929 } 930 931 /* 932 * Prepare a dummy platform data structure to carry the module 933 * parameters. If mem_size isn't set, then there are no module 934 * parameters, and we can skip this. 935 */ 936 if (!mem_size) 937 return; 938 939 pr_info("using module parameters\n"); 940 941 memset(&pdata, 0, sizeof(pdata)); 942 pdata.mem_size = mem_size; 943 pdata.mem_address = mem_address; 944 pdata.mem_type = mem_type; 945 pdata.record_size = record_size; 946 pdata.console_size = ramoops_console_size; 947 pdata.ftrace_size = ramoops_ftrace_size; 948 pdata.pmsg_size = ramoops_pmsg_size; 949 /* If "max_reason" is set, its value has priority over "dump_oops". */ 950 if (ramoops_max_reason >= 0) 951 pdata.max_reason = ramoops_max_reason; 952 /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */ 953 else if (ramoops_dump_oops != -1) 954 pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS 955 : KMSG_DUMP_PANIC; 956 /* And if neither are explicitly set, use the default. */ 957 else 958 pdata.max_reason = KMSG_DUMP_OOPS; 959 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 960 961 /* 962 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 963 * (using 1 byte for ECC isn't much of use anyway). 964 */ 965 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 966 967 dummy = platform_device_register_data(NULL, "ramoops", -1, 968 &pdata, sizeof(pdata)); 969 if (IS_ERR(dummy)) { 970 pr_info("could not create platform device: %ld\n", 971 PTR_ERR(dummy)); 972 dummy = NULL; 973 } 974 } 975 976 static int __init ramoops_init(void) 977 { 978 int ret; 979 980 ramoops_register_dummy(); 981 ret = platform_driver_register(&ramoops_driver); 982 if (ret != 0) 983 ramoops_unregister_dummy(); 984 985 return ret; 986 } 987 postcore_initcall(ramoops_init); 988 989 static void __exit ramoops_exit(void) 990 { 991 platform_driver_unregister(&ramoops_driver); 992 ramoops_unregister_dummy(); 993 } 994 module_exit(ramoops_exit); 995 996 MODULE_LICENSE("GPL"); 997 MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 998 MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 999
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.