~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/platforms/pseries/papr-vpd.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 
  3 #define pr_fmt(fmt) "papr-vpd: " fmt
  4 
  5 #include <linux/anon_inodes.h>
  6 #include <linux/build_bug.h>
  7 #include <linux/file.h>
  8 #include <linux/fs.h>
  9 #include <linux/init.h>
 10 #include <linux/lockdep.h>
 11 #include <linux/kernel.h>
 12 #include <linux/miscdevice.h>
 13 #include <linux/signal.h>
 14 #include <linux/slab.h>
 15 #include <linux/string.h>
 16 #include <linux/string_helpers.h>
 17 #include <linux/uaccess.h>
 18 #include <asm/machdep.h>
 19 #include <asm/papr-vpd.h>
 20 #include <asm/rtas-work-area.h>
 21 #include <asm/rtas.h>
 22 #include <uapi/asm/papr-vpd.h>
 23 
 24 /*
 25  * Function-specific return values for ibm,get-vpd, derived from PAPR+
 26  * v2.13 7.3.20 "ibm,get-vpd RTAS Call".
 27  */
 28 #define RTAS_IBM_GET_VPD_COMPLETE    0 /* All VPD has been retrieved. */
 29 #define RTAS_IBM_GET_VPD_MORE_DATA   1 /* More VPD is available. */
 30 #define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */
 31 
 32 /**
 33  * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd.
 34  * @loc_code:  In: Caller-provided location code buffer. Must be RTAS-addressable.
 35  * @work_area: In: Caller-provided work area buffer for results.
 36  * @sequence:  In: Sequence number. Out: Next sequence number.
 37  * @written:   Out: Bytes written by ibm,get-vpd to @work_area.
 38  * @status:    Out: RTAS call status.
 39  */
 40 struct rtas_ibm_get_vpd_params {
 41         const struct papr_location_code *loc_code;
 42         struct rtas_work_area *work_area;
 43         u32 sequence;
 44         u32 written;
 45         s32 status;
 46 };
 47 
 48 /**
 49  * rtas_ibm_get_vpd() - Call ibm,get-vpd to fill a work area buffer.
 50  * @params: See &struct rtas_ibm_get_vpd_params.
 51  *
 52  * Calls ibm,get-vpd until it errors or successfully deposits data
 53  * into the supplied work area. Handles RTAS retry statuses. Maps RTAS
 54  * error statuses to reasonable errno values.
 55  *
 56  * The caller is expected to invoke rtas_ibm_get_vpd() multiple times
 57  * to retrieve all the VPD for the provided location code. Only one
 58  * sequence should be in progress at any time; starting a new sequence
 59  * will disrupt any sequence already in progress. Serialization of VPD
 60  * retrieval sequences is the responsibility of the caller.
 61  *
 62  * The caller should inspect @params.status to determine whether more
 63  * calls are needed to complete the sequence.
 64  *
 65  * Context: May sleep.
 66  * Return: -ve on error, 0 otherwise.
 67  */
 68 static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params)
 69 {
 70         const struct papr_location_code *loc_code = params->loc_code;
 71         struct rtas_work_area *work_area = params->work_area;
 72         u32 rets[2];
 73         s32 fwrc;
 74         int ret;
 75 
 76         lockdep_assert_held(&rtas_ibm_get_vpd_lock);
 77 
 78         do {
 79                 fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_GET_VPD), 4, 3,
 80                                  rets,
 81                                  __pa(loc_code),
 82                                  rtas_work_area_phys(work_area),
 83                                  rtas_work_area_size(work_area),
 84                                  params->sequence);
 85         } while (rtas_busy_delay(fwrc));
 86 
 87         switch (fwrc) {
 88         case RTAS_HARDWARE_ERROR:
 89                 ret = -EIO;
 90                 break;
 91         case RTAS_INVALID_PARAMETER:
 92                 ret = -EINVAL;
 93                 break;
 94         case RTAS_IBM_GET_VPD_START_OVER:
 95                 ret = -EAGAIN;
 96                 break;
 97         case RTAS_IBM_GET_VPD_MORE_DATA:
 98                 params->sequence = rets[0];
 99                 fallthrough;
100         case RTAS_IBM_GET_VPD_COMPLETE:
101                 params->written = rets[1];
102                 /*
103                  * Kernel or firmware bug, do not continue.
104                  */
105                 if (WARN(params->written > rtas_work_area_size(work_area),
106                          "possible write beyond end of work area"))
107                         ret = -EFAULT;
108                 else
109                         ret = 0;
110                 break;
111         default:
112                 ret = -EIO;
113                 pr_err_ratelimited("unexpected ibm,get-vpd status %d\n", fwrc);
114                 break;
115         }
116 
117         params->status = fwrc;
118         return ret;
119 }
120 
121 /*
122  * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into
123  * an immutable buffer to be attached to a file descriptor.
124  */
125 struct vpd_blob {
126         const char *data;
127         size_t len;
128 };
129 
130 static bool vpd_blob_has_data(const struct vpd_blob *blob)
131 {
132         return blob->data && blob->len;
133 }
134 
135 static void vpd_blob_free(const struct vpd_blob *blob)
136 {
137         if (blob) {
138                 kvfree(blob->data);
139                 kfree(blob);
140         }
141 }
142 
143 /**
144  * vpd_blob_extend() - Append data to a &struct vpd_blob.
145  * @blob: The blob to extend.
146  * @data: The new data to append to @blob.
147  * @len:  The length of @data.
148  *
149  * Context: May sleep.
150  * Return: -ENOMEM on allocation failure, 0 otherwise.
151  */
152 static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
153 {
154         const size_t new_len = blob->len + len;
155         const size_t old_len = blob->len;
156         const char *old_ptr = blob->data;
157         char *new_ptr;
158 
159         new_ptr = old_ptr ?
160                 kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) :
161                 kvmalloc(len, GFP_KERNEL_ACCOUNT);
162 
163         if (!new_ptr)
164                 return -ENOMEM;
165 
166         memcpy(&new_ptr[old_len], data, len);
167         blob->data = new_ptr;
168         blob->len = new_len;
169         return 0;
170 }
171 
172 /**
173  * vpd_blob_generate() - Construct a new &struct vpd_blob.
174  * @generator: Function that supplies the blob data.
175  * @arg:       Context pointer supplied by caller, passed to @generator.
176  *
177  * The @generator callback is invoked until it returns NULL. @arg is
178  * passed to @generator in its first argument on each call. When
179  * @generator returns data, it should store the data length in its
180  * second argument.
181  *
182  * Context: May sleep.
183  * Return: A completely populated &struct vpd_blob, or NULL on error.
184  */
185 static const struct vpd_blob *
186 vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg)
187 {
188         struct vpd_blob *blob;
189         const char *buf;
190         size_t len;
191         int err = 0;
192 
193         blob  = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT);
194         if (!blob)
195                 return NULL;
196 
197         while (err == 0 && (buf = generator(arg, &len)))
198                 err = vpd_blob_extend(blob, buf, len);
199 
200         if (err != 0 || !vpd_blob_has_data(blob))
201                 goto free_blob;
202 
203         return blob;
204 free_blob:
205         vpd_blob_free(blob);
206         return NULL;
207 }
208 
209 /*
210  * Internal VPD sequence APIs. A VPD sequence is a series of calls to
211  * ibm,get-vpd for a given location code. The sequence ends when an
212  * error is encountered or all VPD for the location code has been
213  * returned.
214  */
215 
216 /**
217  * struct vpd_sequence - State for managing a VPD sequence.
218  * @error:  Shall be zero as long as the sequence has not encountered an error,
219  *          -ve errno otherwise. Use vpd_sequence_set_err() to update this.
220  * @params: Parameter block to pass to rtas_ibm_get_vpd().
221  */
222 struct vpd_sequence {
223         int error;
224         struct rtas_ibm_get_vpd_params params;
225 };
226 
227 /**
228  * vpd_sequence_begin() - Begin a VPD retrieval sequence.
229  * @seq:      Uninitialized sequence state.
230  * @loc_code: Location code that defines the scope of the VPD to return.
231  *
232  * Initializes @seq with the resources necessary to carry out a VPD
233  * sequence. Callers must pass @seq to vpd_sequence_end() regardless
234  * of whether the sequence succeeds.
235  *
236  * Context: May sleep.
237  */
238 static void vpd_sequence_begin(struct vpd_sequence *seq,
239                                const struct papr_location_code *loc_code)
240 {
241         /*
242          * Use a static data structure for the location code passed to
243          * RTAS to ensure it's in the RMA and avoid a separate work
244          * area allocation. Guarded by the function lock.
245          */
246         static struct papr_location_code static_loc_code;
247 
248         /*
249          * We could allocate the work area before acquiring the
250          * function lock, but that would allow concurrent requests to
251          * exhaust the limited work area pool for no benefit. So
252          * allocate the work area under the lock.
253          */
254         mutex_lock(&rtas_ibm_get_vpd_lock);
255         static_loc_code = *loc_code;
256         *seq = (struct vpd_sequence) {
257                 .params = {
258                         .work_area = rtas_work_area_alloc(SZ_4K),
259                         .loc_code = &static_loc_code,
260                         .sequence = 1,
261                 },
262         };
263 }
264 
265 /**
266  * vpd_sequence_end() - Finalize a VPD retrieval sequence.
267  * @seq: Sequence state.
268  *
269  * Releases resources obtained by vpd_sequence_begin().
270  */
271 static void vpd_sequence_end(struct vpd_sequence *seq)
272 {
273         rtas_work_area_free(seq->params.work_area);
274         mutex_unlock(&rtas_ibm_get_vpd_lock);
275 }
276 
277 /**
278  * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence
279  *                              should continue.
280  * @seq: VPD sequence state.
281  *
282  * Examines the sequence error state and outputs of the last call to
283  * ibm,get-vpd to determine whether the sequence in progress should
284  * continue or stop.
285  *
286  * Return: True if the sequence has encountered an error or if all VPD for
287  *         this sequence has been retrieved. False otherwise.
288  */
289 static bool vpd_sequence_should_stop(const struct vpd_sequence *seq)
290 {
291         bool done;
292 
293         if (seq->error)
294                 return true;
295 
296         switch (seq->params.status) {
297         case 0:
298                 if (seq->params.written == 0)
299                         done = false; /* Initial state. */
300                 else
301                         done = true; /* All data consumed. */
302                 break;
303         case 1:
304                 done = false; /* More data available. */
305                 break;
306         default:
307                 done = true; /* Error encountered. */
308                 break;
309         }
310 
311         return done;
312 }
313 
314 static int vpd_sequence_set_err(struct vpd_sequence *seq, int err)
315 {
316         /* Preserve the first error recorded. */
317         if (seq->error == 0)
318                 seq->error = err;
319 
320         return seq->error;
321 }
322 
323 /*
324  * Generator function to be passed to vpd_blob_generate().
325  */
326 static const char *vpd_sequence_fill_work_area(void *arg, size_t *len)
327 {
328         struct vpd_sequence *seq = arg;
329         struct rtas_ibm_get_vpd_params *p = &seq->params;
330 
331         if (vpd_sequence_should_stop(seq))
332                 return NULL;
333         if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p)))
334                 return NULL;
335         *len = p->written;
336         return rtas_work_area_raw_buf(p->work_area);
337 }
338 
339 /*
340  * Higher-level VPD retrieval code below. These functions use the
341  * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based
342  * VPD handles for consumption by user space.
343  */
344 
345 /**
346  * papr_vpd_run_sequence() - Run a single VPD retrieval sequence.
347  * @loc_code: Location code that defines the scope of VPD to return.
348  *
349  * Context: May sleep. Holds a mutex and an RTAS work area for its
350  *          duration. Typically performs multiple sleepable slab
351  *          allocations.
352  *
353  * Return: A populated &struct vpd_blob on success. Encoded error
354  * pointer otherwise.
355  */
356 static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code)
357 {
358         const struct vpd_blob *blob;
359         struct vpd_sequence seq;
360 
361         vpd_sequence_begin(&seq, loc_code);
362         blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq);
363         if (!blob)
364                 vpd_sequence_set_err(&seq, -ENOMEM);
365         vpd_sequence_end(&seq);
366 
367         if (seq.error) {
368                 vpd_blob_free(blob);
369                 return ERR_PTR(seq.error);
370         }
371 
372         return blob;
373 }
374 
375 /**
376  * papr_vpd_retrieve() - Return the VPD for a location code.
377  * @loc_code: Location code that defines the scope of VPD to return.
378  *
379  * Run VPD sequences against @loc_code until a blob is successfully
380  * instantiated, or a hard error is encountered, or a fatal signal is
381  * pending.
382  *
383  * Context: May sleep.
384  * Return: A fully populated VPD blob when successful. Encoded error
385  * pointer otherwise.
386  */
387 static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code)
388 {
389         const struct vpd_blob *blob;
390 
391         /*
392          * EAGAIN means the sequence errored with a -4 (VPD changed)
393          * status from ibm,get-vpd, and we should attempt a new
394          * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this
395          * should be a transient condition, not something that happens
396          * continuously. But we'll stop trying on a fatal signal.
397          */
398         do {
399                 blob = papr_vpd_run_sequence(loc_code);
400                 if (!IS_ERR(blob)) /* Success. */
401                         break;
402                 if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */
403                         break;
404                 pr_info_ratelimited("VPD changed during retrieval, retrying\n");
405                 cond_resched();
406         } while (!fatal_signal_pending(current));
407 
408         return blob;
409 }
410 
411 static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off)
412 {
413         const struct vpd_blob *blob = file->private_data;
414 
415         /* bug: we should not instantiate a handle without any data attached. */
416         if (!vpd_blob_has_data(blob)) {
417                 pr_err_once("handle without data\n");
418                 return -EIO;
419         }
420 
421         return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
422 }
423 
424 static int papr_vpd_handle_release(struct inode *inode, struct file *file)
425 {
426         const struct vpd_blob *blob = file->private_data;
427 
428         vpd_blob_free(blob);
429 
430         return 0;
431 }
432 
433 static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence)
434 {
435         const struct vpd_blob *blob = file->private_data;
436 
437         return fixed_size_llseek(file, off, whence, blob->len);
438 }
439 
440 
441 static const struct file_operations papr_vpd_handle_ops = {
442         .read = papr_vpd_handle_read,
443         .llseek = papr_vpd_handle_seek,
444         .release = papr_vpd_handle_release,
445 };
446 
447 /**
448  * papr_vpd_create_handle() - Create a fd-based handle for reading VPD.
449  * @ulc: Location code in user memory; defines the scope of the VPD to
450  *       retrieve.
451  *
452  * Handler for PAPR_VPD_IOC_CREATE_HANDLE ioctl command. Validates
453  * @ulc and instantiates an immutable VPD "blob" for it. The blob is
454  * attached to a file descriptor for reading by user space. The memory
455  * backing the blob is freed when the file is released.
456  *
457  * The entire requested VPD is retrieved by this call and all
458  * necessary RTAS interactions are performed before returning the fd
459  * to user space. This keeps the read handler simple and ensures that
460  * the kernel can prevent interleaving of ibm,get-vpd call sequences.
461  *
462  * Return: The installed fd number if successful, -ve errno otherwise.
463  */
464 static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
465 {
466         struct papr_location_code klc;
467         const struct vpd_blob *blob;
468         struct file *file;
469         long err;
470         int fd;
471 
472         if (copy_from_user(&klc, ulc, sizeof(klc)))
473                 return -EFAULT;
474 
475         if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str)))
476                 return -EINVAL;
477 
478         blob = papr_vpd_retrieve(&klc);
479         if (IS_ERR(blob))
480                 return PTR_ERR(blob);
481 
482         fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
483         if (fd < 0) {
484                 err = fd;
485                 goto free_blob;
486         }
487 
488         file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops,
489                                   (void *)blob, O_RDONLY);
490         if (IS_ERR(file)) {
491                 err = PTR_ERR(file);
492                 goto put_fd;
493         }
494 
495         file->f_mode |= FMODE_LSEEK | FMODE_PREAD;
496         fd_install(fd, file);
497         return fd;
498 put_fd:
499         put_unused_fd(fd);
500 free_blob:
501         vpd_blob_free(blob);
502         return err;
503 }
504 
505 /*
506  * Top-level ioctl handler for /dev/papr-vpd.
507  */
508 static long papr_vpd_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
509 {
510         void __user *argp = (__force void __user *)arg;
511         long ret;
512 
513         switch (ioctl) {
514         case PAPR_VPD_IOC_CREATE_HANDLE:
515                 ret = papr_vpd_create_handle(argp);
516                 break;
517         default:
518                 ret = -ENOIOCTLCMD;
519                 break;
520         }
521         return ret;
522 }
523 
524 static const struct file_operations papr_vpd_ops = {
525         .unlocked_ioctl = papr_vpd_dev_ioctl,
526 };
527 
528 static struct miscdevice papr_vpd_dev = {
529         .minor = MISC_DYNAMIC_MINOR,
530         .name = "papr-vpd",
531         .fops = &papr_vpd_ops,
532 };
533 
534 static __init int papr_vpd_init(void)
535 {
536         if (!rtas_function_implemented(RTAS_FN_IBM_GET_VPD))
537                 return -ENODEV;
538 
539         return misc_register(&papr_vpd_dev);
540 }
541 machine_device_initcall(pseries, papr_vpd_init);
542 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php