~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/hyp/nvhe/ffa.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
  4  * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
  5  * Framework for Arm A-profile", which is specified by Arm in document
  6  * number DEN0077.
  7  *
  8  * Copyright (C) 2022 - Google LLC
  9  * Author: Andrew Walbran <qwandor@google.com>
 10  *
 11  * This driver hooks into the SMC trapping logic for the host and intercepts
 12  * all calls falling within the FF-A range. Each call is either:
 13  *
 14  *      - Forwarded on unmodified to the SPMD at EL3
 15  *      - Rejected as "unsupported"
 16  *      - Accompanied by a host stage-2 page-table check/update and reissued
 17  *
 18  * Consequently, any attempts by the host to make guest memory pages
 19  * accessible to the secure world using FF-A will be detected either here
 20  * (in the case that the memory is already owned by the guest) or during
 21  * donation to the guest (in the case that the memory was previously shared
 22  * with the secure world).
 23  *
 24  * To allow the rolling-back of page-table updates and FF-A calls in the
 25  * event of failure, operations involving the RXTX buffers are locked for
 26  * the duration and are therefore serialised.
 27  */
 28 
 29 #include <linux/arm-smccc.h>
 30 #include <linux/arm_ffa.h>
 31 #include <asm/kvm_pkvm.h>
 32 
 33 #include <nvhe/ffa.h>
 34 #include <nvhe/mem_protect.h>
 35 #include <nvhe/memory.h>
 36 #include <nvhe/trap_handler.h>
 37 #include <nvhe/spinlock.h>
 38 
 39 /*
 40  * "ID value 0 must be returned at the Non-secure physical FF-A instance"
 41  * We share this ID with the host.
 42  */
 43 #define HOST_FFA_ID     0
 44 
 45 /*
 46  * A buffer to hold the maximum descriptor size we can see from the host,
 47  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
 48  * when resolving the handle on the reclaim path.
 49  */
 50 struct kvm_ffa_descriptor_buffer {
 51         void    *buf;
 52         size_t  len;
 53 };
 54 
 55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
 56 
 57 struct kvm_ffa_buffers {
 58         hyp_spinlock_t lock;
 59         void *tx;
 60         void *rx;
 61 };
 62 
 63 /*
 64  * Note that we don't currently lock these buffers explicitly, instead
 65  * relying on the locking of the host FFA buffers as we only have one
 66  * client.
 67  */
 68 static struct kvm_ffa_buffers hyp_buffers;
 69 static struct kvm_ffa_buffers host_buffers;
 70 static u32 hyp_ffa_version;
 71 static bool has_version_negotiated;
 72 static hyp_spinlock_t version_lock;
 73 
 74 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
 75 {
 76         *res = (struct arm_smccc_res) {
 77                 .a0     = FFA_ERROR,
 78                 .a2     = ffa_errno,
 79         };
 80 }
 81 
 82 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
 83 {
 84         if (ret == FFA_RET_SUCCESS) {
 85                 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
 86                                                 .a2 = prop };
 87         } else {
 88                 ffa_to_smccc_error(res, ret);
 89         }
 90 }
 91 
 92 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
 93 {
 94         ffa_to_smccc_res_prop(res, ret, 0);
 95 }
 96 
 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt,
 98                            struct arm_smccc_res *res)
 99 {
100         cpu_reg(ctxt, 0) = res->a0;
101         cpu_reg(ctxt, 1) = res->a1;
102         cpu_reg(ctxt, 2) = res->a2;
103         cpu_reg(ctxt, 3) = res->a3;
104 }
105 
106 static bool is_ffa_call(u64 func_id)
107 {
108         return ARM_SMCCC_IS_FAST_CALL(func_id) &&
109                ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
110                ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
111                ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
112 }
113 
114 static int ffa_map_hyp_buffers(u64 ffa_page_count)
115 {
116         struct arm_smccc_res res;
117 
118         arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
119                           hyp_virt_to_phys(hyp_buffers.tx),
120                           hyp_virt_to_phys(hyp_buffers.rx),
121                           ffa_page_count,
122                           0, 0, 0, 0,
123                           &res);
124 
125         return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
126 }
127 
128 static int ffa_unmap_hyp_buffers(void)
129 {
130         struct arm_smccc_res res;
131 
132         arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
133                           HOST_FFA_ID,
134                           0, 0, 0, 0, 0, 0,
135                           &res);
136 
137         return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
138 }
139 
140 static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
141                              u32 handle_hi, u32 fraglen, u32 endpoint_id)
142 {
143         arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
144                           handle_lo, handle_hi, fraglen, endpoint_id,
145                           0, 0, 0,
146                           res);
147 }
148 
149 static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
150                              u32 handle_hi, u32 fragoff)
151 {
152         arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
153                           handle_lo, handle_hi, fragoff, HOST_FFA_ID,
154                           0, 0, 0,
155                           res);
156 }
157 
158 static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
159                           u32 fraglen)
160 {
161         arm_smccc_1_1_smc(func_id, len, fraglen,
162                           0, 0, 0, 0, 0,
163                           res);
164 }
165 
166 static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
167                              u32 handle_hi, u32 flags)
168 {
169         arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
170                           handle_lo, handle_hi, flags,
171                           0, 0, 0, 0,
172                           res);
173 }
174 
175 static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
176 {
177         arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
178                           len, len,
179                           0, 0, 0, 0, 0,
180                           res);
181 }
182 
183 static void ffa_rx_release(struct arm_smccc_res *res)
184 {
185         arm_smccc_1_1_smc(FFA_RX_RELEASE,
186                           0, 0,
187                           0, 0, 0, 0, 0,
188                           res);
189 }
190 
191 static void do_ffa_rxtx_map(struct arm_smccc_res *res,
192                             struct kvm_cpu_context *ctxt)
193 {
194         DECLARE_REG(phys_addr_t, tx, ctxt, 1);
195         DECLARE_REG(phys_addr_t, rx, ctxt, 2);
196         DECLARE_REG(u32, npages, ctxt, 3);
197         int ret = 0;
198         void *rx_virt, *tx_virt;
199 
200         if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
201                 ret = FFA_RET_INVALID_PARAMETERS;
202                 goto out;
203         }
204 
205         if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
206                 ret = FFA_RET_INVALID_PARAMETERS;
207                 goto out;
208         }
209 
210         hyp_spin_lock(&host_buffers.lock);
211         if (host_buffers.tx) {
212                 ret = FFA_RET_DENIED;
213                 goto out_unlock;
214         }
215 
216         /*
217          * Map our hypervisor buffers into the SPMD before mapping and
218          * pinning the host buffers in our own address space.
219          */
220         ret = ffa_map_hyp_buffers(npages);
221         if (ret)
222                 goto out_unlock;
223 
224         ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
225         if (ret) {
226                 ret = FFA_RET_INVALID_PARAMETERS;
227                 goto err_unmap;
228         }
229 
230         ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
231         if (ret) {
232                 ret = FFA_RET_INVALID_PARAMETERS;
233                 goto err_unshare_tx;
234         }
235 
236         tx_virt = hyp_phys_to_virt(tx);
237         ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
238         if (ret) {
239                 ret = FFA_RET_INVALID_PARAMETERS;
240                 goto err_unshare_rx;
241         }
242 
243         rx_virt = hyp_phys_to_virt(rx);
244         ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
245         if (ret) {
246                 ret = FFA_RET_INVALID_PARAMETERS;
247                 goto err_unpin_tx;
248         }
249 
250         host_buffers.tx = tx_virt;
251         host_buffers.rx = rx_virt;
252 
253 out_unlock:
254         hyp_spin_unlock(&host_buffers.lock);
255 out:
256         ffa_to_smccc_res(res, ret);
257         return;
258 
259 err_unpin_tx:
260         hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
261 err_unshare_rx:
262         __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
263 err_unshare_tx:
264         __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
265 err_unmap:
266         ffa_unmap_hyp_buffers();
267         goto out_unlock;
268 }
269 
270 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
271                               struct kvm_cpu_context *ctxt)
272 {
273         DECLARE_REG(u32, id, ctxt, 1);
274         int ret = 0;
275 
276         if (id != HOST_FFA_ID) {
277                 ret = FFA_RET_INVALID_PARAMETERS;
278                 goto out;
279         }
280 
281         hyp_spin_lock(&host_buffers.lock);
282         if (!host_buffers.tx) {
283                 ret = FFA_RET_INVALID_PARAMETERS;
284                 goto out_unlock;
285         }
286 
287         hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
288         WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
289         host_buffers.tx = NULL;
290 
291         hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
292         WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
293         host_buffers.rx = NULL;
294 
295         ffa_unmap_hyp_buffers();
296 
297 out_unlock:
298         hyp_spin_unlock(&host_buffers.lock);
299 out:
300         ffa_to_smccc_res(res, ret);
301 }
302 
303 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
304                                    u32 nranges)
305 {
306         u32 i;
307 
308         for (i = 0; i < nranges; ++i) {
309                 struct ffa_mem_region_addr_range *range = &ranges[i];
310                 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
311                 u64 pfn = hyp_phys_to_pfn(range->address);
312 
313                 if (!PAGE_ALIGNED(sz))
314                         break;
315 
316                 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
317                         break;
318         }
319 
320         return i;
321 }
322 
323 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
324                                      u32 nranges)
325 {
326         u32 i;
327 
328         for (i = 0; i < nranges; ++i) {
329                 struct ffa_mem_region_addr_range *range = &ranges[i];
330                 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
331                 u64 pfn = hyp_phys_to_pfn(range->address);
332 
333                 if (!PAGE_ALIGNED(sz))
334                         break;
335 
336                 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
337                         break;
338         }
339 
340         return i;
341 }
342 
343 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
344                                  u32 nranges)
345 {
346         u32 nshared = __ffa_host_share_ranges(ranges, nranges);
347         int ret = 0;
348 
349         if (nshared != nranges) {
350                 WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
351                 ret = FFA_RET_DENIED;
352         }
353 
354         return ret;
355 }
356 
357 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
358                                    u32 nranges)
359 {
360         u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
361         int ret = 0;
362 
363         if (nunshared != nranges) {
364                 WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
365                 ret = FFA_RET_DENIED;
366         }
367 
368         return ret;
369 }
370 
371 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
372                                struct kvm_cpu_context *ctxt)
373 {
374         DECLARE_REG(u32, handle_lo, ctxt, 1);
375         DECLARE_REG(u32, handle_hi, ctxt, 2);
376         DECLARE_REG(u32, fraglen, ctxt, 3);
377         DECLARE_REG(u32, endpoint_id, ctxt, 4);
378         struct ffa_mem_region_addr_range *buf;
379         int ret = FFA_RET_INVALID_PARAMETERS;
380         u32 nr_ranges;
381 
382         if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
383                 goto out;
384 
385         if (fraglen % sizeof(*buf))
386                 goto out;
387 
388         hyp_spin_lock(&host_buffers.lock);
389         if (!host_buffers.tx)
390                 goto out_unlock;
391 
392         buf = hyp_buffers.tx;
393         memcpy(buf, host_buffers.tx, fraglen);
394         nr_ranges = fraglen / sizeof(*buf);
395 
396         ret = ffa_host_share_ranges(buf, nr_ranges);
397         if (ret) {
398                 /*
399                  * We're effectively aborting the transaction, so we need
400                  * to restore the global state back to what it was prior to
401                  * transmission of the first fragment.
402                  */
403                 ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
404                 WARN_ON(res->a0 != FFA_SUCCESS);
405                 goto out_unlock;
406         }
407 
408         ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
409         if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
410                 WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
411 
412 out_unlock:
413         hyp_spin_unlock(&host_buffers.lock);
414 out:
415         if (ret)
416                 ffa_to_smccc_res(res, ret);
417 
418         /*
419          * If for any reason this did not succeed, we're in trouble as we have
420          * now lost the content of the previous fragments and we can't rollback
421          * the host stage-2 changes. The pages previously marked as shared will
422          * remain stuck in that state forever, hence preventing the host from
423          * sharing/donating them again and may possibly lead to subsequent
424          * failures, but this will not compromise confidentiality.
425          */
426         return;
427 }
428 
429 static void __do_ffa_mem_xfer(const u64 func_id,
430                               struct arm_smccc_res *res,
431                               struct kvm_cpu_context *ctxt)
432 {
433         DECLARE_REG(u32, len, ctxt, 1);
434         DECLARE_REG(u32, fraglen, ctxt, 2);
435         DECLARE_REG(u64, addr_mbz, ctxt, 3);
436         DECLARE_REG(u32, npages_mbz, ctxt, 4);
437         struct ffa_mem_region_attributes *ep_mem_access;
438         struct ffa_composite_mem_region *reg;
439         struct ffa_mem_region *buf;
440         u32 offset, nr_ranges;
441         int ret = 0;
442 
443         if (addr_mbz || npages_mbz || fraglen > len ||
444             fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
445                 ret = FFA_RET_INVALID_PARAMETERS;
446                 goto out;
447         }
448 
449         if (fraglen < sizeof(struct ffa_mem_region) +
450                       sizeof(struct ffa_mem_region_attributes)) {
451                 ret = FFA_RET_INVALID_PARAMETERS;
452                 goto out;
453         }
454 
455         hyp_spin_lock(&host_buffers.lock);
456         if (!host_buffers.tx) {
457                 ret = FFA_RET_INVALID_PARAMETERS;
458                 goto out_unlock;
459         }
460 
461         if (len > ffa_desc_buf.len) {
462                 ret = FFA_RET_NO_MEMORY;
463                 goto out_unlock;
464         }
465 
466         buf = hyp_buffers.tx;
467         memcpy(buf, host_buffers.tx, fraglen);
468 
469         ep_mem_access = (void *)buf +
470                         ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
471         offset = ep_mem_access->composite_off;
472         if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
473                 ret = FFA_RET_INVALID_PARAMETERS;
474                 goto out_unlock;
475         }
476 
477         if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
478                 ret = FFA_RET_INVALID_PARAMETERS;
479                 goto out_unlock;
480         }
481 
482         reg = (void *)buf + offset;
483         nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
484         if (nr_ranges % sizeof(reg->constituents[0])) {
485                 ret = FFA_RET_INVALID_PARAMETERS;
486                 goto out_unlock;
487         }
488 
489         nr_ranges /= sizeof(reg->constituents[0]);
490         ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
491         if (ret)
492                 goto out_unlock;
493 
494         ffa_mem_xfer(res, func_id, len, fraglen);
495         if (fraglen != len) {
496                 if (res->a0 != FFA_MEM_FRAG_RX)
497                         goto err_unshare;
498 
499                 if (res->a3 != fraglen)
500                         goto err_unshare;
501         } else if (res->a0 != FFA_SUCCESS) {
502                 goto err_unshare;
503         }
504 
505 out_unlock:
506         hyp_spin_unlock(&host_buffers.lock);
507 out:
508         if (ret)
509                 ffa_to_smccc_res(res, ret);
510         return;
511 
512 err_unshare:
513         WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
514         goto out_unlock;
515 }
516 
517 #define do_ffa_mem_xfer(fid, res, ctxt)                         \
518         do {                                                    \
519                 BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE &&     \
520                              (fid) != FFA_FN64_MEM_LEND);       \
521                 __do_ffa_mem_xfer((fid), (res), (ctxt));        \
522         } while (0);
523 
524 static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
525                                struct kvm_cpu_context *ctxt)
526 {
527         DECLARE_REG(u32, handle_lo, ctxt, 1);
528         DECLARE_REG(u32, handle_hi, ctxt, 2);
529         DECLARE_REG(u32, flags, ctxt, 3);
530         struct ffa_mem_region_attributes *ep_mem_access;
531         struct ffa_composite_mem_region *reg;
532         u32 offset, len, fraglen, fragoff;
533         struct ffa_mem_region *buf;
534         int ret = 0;
535         u64 handle;
536 
537         handle = PACK_HANDLE(handle_lo, handle_hi);
538 
539         hyp_spin_lock(&host_buffers.lock);
540 
541         buf = hyp_buffers.tx;
542         *buf = (struct ffa_mem_region) {
543                 .sender_id      = HOST_FFA_ID,
544                 .handle         = handle,
545         };
546 
547         ffa_retrieve_req(res, sizeof(*buf));
548         buf = hyp_buffers.rx;
549         if (res->a0 != FFA_MEM_RETRIEVE_RESP)
550                 goto out_unlock;
551 
552         len = res->a1;
553         fraglen = res->a2;
554 
555         ep_mem_access = (void *)buf +
556                         ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
557         offset = ep_mem_access->composite_off;
558         /*
559          * We can trust the SPMD to get this right, but let's at least
560          * check that we end up with something that doesn't look _completely_
561          * bogus.
562          */
563         if (WARN_ON(offset > len ||
564                     fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
565                 ret = FFA_RET_ABORTED;
566                 ffa_rx_release(res);
567                 goto out_unlock;
568         }
569 
570         if (len > ffa_desc_buf.len) {
571                 ret = FFA_RET_NO_MEMORY;
572                 ffa_rx_release(res);
573                 goto out_unlock;
574         }
575 
576         buf = ffa_desc_buf.buf;
577         memcpy(buf, hyp_buffers.rx, fraglen);
578         ffa_rx_release(res);
579 
580         for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
581                 ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
582                 if (res->a0 != FFA_MEM_FRAG_TX) {
583                         ret = FFA_RET_INVALID_PARAMETERS;
584                         goto out_unlock;
585                 }
586 
587                 fraglen = res->a3;
588                 memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
589                 ffa_rx_release(res);
590         }
591 
592         ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
593         if (res->a0 != FFA_SUCCESS)
594                 goto out_unlock;
595 
596         reg = (void *)buf + offset;
597         /* If the SPMD was happy, then we should be too. */
598         WARN_ON(ffa_host_unshare_ranges(reg->constituents,
599                                         reg->addr_range_cnt));
600 out_unlock:
601         hyp_spin_unlock(&host_buffers.lock);
602 
603         if (ret)
604                 ffa_to_smccc_res(res, ret);
605 }
606 
607 /*
608  * Is a given FFA function supported, either by forwarding on directly
609  * or by handling at EL2?
610  */
611 static bool ffa_call_supported(u64 func_id)
612 {
613         switch (func_id) {
614         /* Unsupported memory management calls */
615         case FFA_FN64_MEM_RETRIEVE_REQ:
616         case FFA_MEM_RETRIEVE_RESP:
617         case FFA_MEM_RELINQUISH:
618         case FFA_MEM_OP_PAUSE:
619         case FFA_MEM_OP_RESUME:
620         case FFA_MEM_FRAG_RX:
621         case FFA_FN64_MEM_DONATE:
622         /* Indirect message passing via RX/TX buffers */
623         case FFA_MSG_SEND:
624         case FFA_MSG_POLL:
625         case FFA_MSG_WAIT:
626         /* 32-bit variants of 64-bit calls */
627         case FFA_MSG_SEND_DIRECT_RESP:
628         case FFA_RXTX_MAP:
629         case FFA_MEM_DONATE:
630         case FFA_MEM_RETRIEVE_REQ:
631                 return false;
632         }
633 
634         return true;
635 }
636 
637 static bool do_ffa_features(struct arm_smccc_res *res,
638                             struct kvm_cpu_context *ctxt)
639 {
640         DECLARE_REG(u32, id, ctxt, 1);
641         u64 prop = 0;
642         int ret = 0;
643 
644         if (!ffa_call_supported(id)) {
645                 ret = FFA_RET_NOT_SUPPORTED;
646                 goto out_handled;
647         }
648 
649         switch (id) {
650         case FFA_MEM_SHARE:
651         case FFA_FN64_MEM_SHARE:
652         case FFA_MEM_LEND:
653         case FFA_FN64_MEM_LEND:
654                 ret = FFA_RET_SUCCESS;
655                 prop = 0; /* No support for dynamic buffers */
656                 goto out_handled;
657         default:
658                 return false;
659         }
660 
661 out_handled:
662         ffa_to_smccc_res_prop(res, ret, prop);
663         return true;
664 }
665 
666 static int hyp_ffa_post_init(void)
667 {
668         size_t min_rxtx_sz;
669         struct arm_smccc_res res;
670 
671         arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
672         if (res.a0 != FFA_SUCCESS)
673                 return -EOPNOTSUPP;
674 
675         if (res.a2 != HOST_FFA_ID)
676                 return -EINVAL;
677 
678         arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
679                           0, 0, 0, 0, 0, 0, &res);
680         if (res.a0 != FFA_SUCCESS)
681                 return -EOPNOTSUPP;
682 
683         switch (res.a2) {
684         case FFA_FEAT_RXTX_MIN_SZ_4K:
685                 min_rxtx_sz = SZ_4K;
686                 break;
687         case FFA_FEAT_RXTX_MIN_SZ_16K:
688                 min_rxtx_sz = SZ_16K;
689                 break;
690         case FFA_FEAT_RXTX_MIN_SZ_64K:
691                 min_rxtx_sz = SZ_64K;
692                 break;
693         default:
694                 return -EINVAL;
695         }
696 
697         if (min_rxtx_sz > PAGE_SIZE)
698                 return -EOPNOTSUPP;
699 
700         return 0;
701 }
702 
703 static void do_ffa_version(struct arm_smccc_res *res,
704                            struct kvm_cpu_context *ctxt)
705 {
706         DECLARE_REG(u32, ffa_req_version, ctxt, 1);
707 
708         if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
709                 res->a0 = FFA_RET_NOT_SUPPORTED;
710                 return;
711         }
712 
713         hyp_spin_lock(&version_lock);
714         if (has_version_negotiated) {
715                 res->a0 = hyp_ffa_version;
716                 goto unlock;
717         }
718 
719         /*
720          * If the client driver tries to downgrade the version, we need to ask
721          * first if TEE supports it.
722          */
723         if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
724                 arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
725                                   0, 0, 0, 0, 0,
726                                   res);
727                 if (res->a0 == FFA_RET_NOT_SUPPORTED)
728                         goto unlock;
729 
730                 hyp_ffa_version = ffa_req_version;
731         }
732 
733         if (hyp_ffa_post_init())
734                 res->a0 = FFA_RET_NOT_SUPPORTED;
735         else {
736                 has_version_negotiated = true;
737                 res->a0 = hyp_ffa_version;
738         }
739 unlock:
740         hyp_spin_unlock(&version_lock);
741 }
742 
743 static void do_ffa_part_get(struct arm_smccc_res *res,
744                             struct kvm_cpu_context *ctxt)
745 {
746         DECLARE_REG(u32, uuid0, ctxt, 1);
747         DECLARE_REG(u32, uuid1, ctxt, 2);
748         DECLARE_REG(u32, uuid2, ctxt, 3);
749         DECLARE_REG(u32, uuid3, ctxt, 4);
750         DECLARE_REG(u32, flags, ctxt, 5);
751         u32 count, partition_sz, copy_sz;
752 
753         hyp_spin_lock(&host_buffers.lock);
754         if (!host_buffers.rx) {
755                 ffa_to_smccc_res(res, FFA_RET_BUSY);
756                 goto out_unlock;
757         }
758 
759         arm_smccc_1_1_smc(FFA_PARTITION_INFO_GET, uuid0, uuid1,
760                           uuid2, uuid3, flags, 0, 0,
761                           res);
762 
763         if (res->a0 != FFA_SUCCESS)
764                 goto out_unlock;
765 
766         count = res->a2;
767         if (!count)
768                 goto out_unlock;
769 
770         if (hyp_ffa_version > FFA_VERSION_1_0) {
771                 /* Get the number of partitions deployed in the system */
772                 if (flags & 0x1)
773                         goto out_unlock;
774 
775                 partition_sz  = res->a3;
776         } else {
777                 /* FFA_VERSION_1_0 lacks the size in the response */
778                 partition_sz = FFA_1_0_PARTITON_INFO_SZ;
779         }
780 
781         copy_sz = partition_sz * count;
782         if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
783                 ffa_to_smccc_res(res, FFA_RET_ABORTED);
784                 goto out_unlock;
785         }
786 
787         memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
788 out_unlock:
789         hyp_spin_unlock(&host_buffers.lock);
790 }
791 
792 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
793 {
794         struct arm_smccc_res res;
795 
796         /*
797          * There's no way we can tell what a non-standard SMC call might
798          * be up to. Ideally, we would terminate these here and return
799          * an error to the host, but sadly devices make use of custom
800          * firmware calls for things like power management, debugging,
801          * RNG access and crash reporting.
802          *
803          * Given that the architecture requires us to trust EL3 anyway,
804          * we forward unrecognised calls on under the assumption that
805          * the firmware doesn't expose a mechanism to access arbitrary
806          * non-secure memory. Short of a per-device table of SMCs, this
807          * is the best we can do.
808          */
809         if (!is_ffa_call(func_id))
810                 return false;
811 
812         if (!has_version_negotiated && func_id != FFA_VERSION) {
813                 ffa_to_smccc_error(&res, FFA_RET_INVALID_PARAMETERS);
814                 goto out_handled;
815         }
816 
817         switch (func_id) {
818         case FFA_FEATURES:
819                 if (!do_ffa_features(&res, host_ctxt))
820                         return false;
821                 goto out_handled;
822         /* Memory management */
823         case FFA_FN64_RXTX_MAP:
824                 do_ffa_rxtx_map(&res, host_ctxt);
825                 goto out_handled;
826         case FFA_RXTX_UNMAP:
827                 do_ffa_rxtx_unmap(&res, host_ctxt);
828                 goto out_handled;
829         case FFA_MEM_SHARE:
830         case FFA_FN64_MEM_SHARE:
831                 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
832                 goto out_handled;
833         case FFA_MEM_RECLAIM:
834                 do_ffa_mem_reclaim(&res, host_ctxt);
835                 goto out_handled;
836         case FFA_MEM_LEND:
837         case FFA_FN64_MEM_LEND:
838                 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
839                 goto out_handled;
840         case FFA_MEM_FRAG_TX:
841                 do_ffa_mem_frag_tx(&res, host_ctxt);
842                 goto out_handled;
843         case FFA_VERSION:
844                 do_ffa_version(&res, host_ctxt);
845                 goto out_handled;
846         case FFA_PARTITION_INFO_GET:
847                 do_ffa_part_get(&res, host_ctxt);
848                 goto out_handled;
849         }
850 
851         if (ffa_call_supported(func_id))
852                 return false; /* Pass through */
853 
854         ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
855 out_handled:
856         ffa_set_retval(host_ctxt, &res);
857         return true;
858 }
859 
860 int hyp_ffa_init(void *pages)
861 {
862         struct arm_smccc_res res;
863         void *tx, *rx;
864 
865         if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
866                 return 0;
867 
868         arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_1, 0, 0, 0, 0, 0, 0, &res);
869         if (res.a0 == FFA_RET_NOT_SUPPORTED)
870                 return 0;
871 
872         /*
873          * Firmware returns the maximum supported version of the FF-A
874          * implementation. Check that the returned version is
875          * backwards-compatible with the hyp according to the rules in DEN0077A
876          * v1.1 REL0 13.2.1.
877          *
878          * Of course, things are never simple when dealing with firmware. v1.1
879          * broke ABI with v1.0 on several structures, which is itself
880          * incompatible with the aforementioned versioning scheme. The
881          * expectation is that v1.x implementations that do not support the v1.0
882          * ABI return NOT_SUPPORTED rather than a version number, according to
883          * DEN0077A v1.1 REL0 18.6.4.
884          */
885         if (FFA_MAJOR_VERSION(res.a0) != 1)
886                 return -EOPNOTSUPP;
887 
888         if (FFA_MINOR_VERSION(res.a0) < FFA_MINOR_VERSION(FFA_VERSION_1_1))
889                 hyp_ffa_version = res.a0;
890         else
891                 hyp_ffa_version = FFA_VERSION_1_1;
892 
893         tx = pages;
894         pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
895         rx = pages;
896         pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
897 
898         ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
899                 .buf    = pages,
900                 .len    = PAGE_SIZE *
901                           (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
902         };
903 
904         hyp_buffers = (struct kvm_ffa_buffers) {
905                 .lock   = __HYP_SPIN_LOCK_UNLOCKED,
906                 .tx     = tx,
907                 .rx     = rx,
908         };
909 
910         host_buffers = (struct kvm_ffa_buffers) {
911                 .lock   = __HYP_SPIN_LOCK_UNLOCKED,
912         };
913 
914         version_lock = __HYP_SPIN_LOCK_UNLOCKED;
915         return 0;
916 }
917 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php