~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/vgic/vgic-its.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * GICv3 ITS emulation
  4  *
  5  * Copyright (C) 2015,2016 ARM Ltd.
  6  * Author: Andre Przywara <andre.przywara@arm.com>
  7  */
  8 
  9 #include <linux/cpu.h>
 10 #include <linux/kvm.h>
 11 #include <linux/kvm_host.h>
 12 #include <linux/interrupt.h>
 13 #include <linux/list.h>
 14 #include <linux/uaccess.h>
 15 #include <linux/list_sort.h>
 16 
 17 #include <linux/irqchip/arm-gic-v3.h>
 18 
 19 #include <asm/kvm_emulate.h>
 20 #include <asm/kvm_arm.h>
 21 #include <asm/kvm_mmu.h>
 22 
 23 #include "vgic.h"
 24 #include "vgic-mmio.h"
 25 
 26 static struct kvm_device_ops kvm_arm_vgic_its_ops;
 27 
 28 static int vgic_its_save_tables_v0(struct vgic_its *its);
 29 static int vgic_its_restore_tables_v0(struct vgic_its *its);
 30 static int vgic_its_commit_v0(struct vgic_its *its);
 31 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
 32                              struct kvm_vcpu *filter_vcpu, bool needs_inv);
 33 
 34 /*
 35  * Creates a new (reference to a) struct vgic_irq for a given LPI.
 36  * If this LPI is already mapped on another ITS, we increase its refcount
 37  * and return a pointer to the existing structure.
 38  * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
 39  * This function returns a pointer to the _unlocked_ structure.
 40  */
 41 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
 42                                      struct kvm_vcpu *vcpu)
 43 {
 44         struct vgic_dist *dist = &kvm->arch.vgic;
 45         struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
 46         unsigned long flags;
 47         int ret;
 48 
 49         /* In this case there is no put, since we keep the reference. */
 50         if (irq)
 51                 return irq;
 52 
 53         irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
 54         if (!irq)
 55                 return ERR_PTR(-ENOMEM);
 56 
 57         ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
 58         if (ret) {
 59                 kfree(irq);
 60                 return ERR_PTR(ret);
 61         }
 62 
 63         INIT_LIST_HEAD(&irq->ap_list);
 64         raw_spin_lock_init(&irq->irq_lock);
 65 
 66         irq->config = VGIC_CONFIG_EDGE;
 67         kref_init(&irq->refcount);
 68         irq->intid = intid;
 69         irq->target_vcpu = vcpu;
 70         irq->group = 1;
 71 
 72         xa_lock_irqsave(&dist->lpi_xa, flags);
 73 
 74         /*
 75          * There could be a race with another vgic_add_lpi(), so we need to
 76          * check that we don't add a second list entry with the same LPI.
 77          */
 78         oldirq = xa_load(&dist->lpi_xa, intid);
 79         if (vgic_try_get_irq_kref(oldirq)) {
 80                 /* Someone was faster with adding this LPI, lets use that. */
 81                 kfree(irq);
 82                 irq = oldirq;
 83 
 84                 goto out_unlock;
 85         }
 86 
 87         ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
 88         if (ret) {
 89                 xa_release(&dist->lpi_xa, intid);
 90                 kfree(irq);
 91         }
 92 
 93 out_unlock:
 94         xa_unlock_irqrestore(&dist->lpi_xa, flags);
 95 
 96         if (ret)
 97                 return ERR_PTR(ret);
 98 
 99         /*
100          * We "cache" the configuration table entries in our struct vgic_irq's.
101          * However we only have those structs for mapped IRQs, so we read in
102          * the respective config data from memory here upon mapping the LPI.
103          *
104          * Should any of these fail, behave as if we couldn't create the LPI
105          * by dropping the refcount and returning the error.
106          */
107         ret = update_lpi_config(kvm, irq, NULL, false);
108         if (ret) {
109                 vgic_put_irq(kvm, irq);
110                 return ERR_PTR(ret);
111         }
112 
113         ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
114         if (ret) {
115                 vgic_put_irq(kvm, irq);
116                 return ERR_PTR(ret);
117         }
118 
119         return irq;
120 }
121 
122 struct its_device {
123         struct list_head dev_list;
124 
125         /* the head for the list of ITTEs */
126         struct list_head itt_head;
127         u32 num_eventid_bits;
128         gpa_t itt_addr;
129         u32 device_id;
130 };
131 
132 #define COLLECTION_NOT_MAPPED ((u32)~0)
133 
134 struct its_collection {
135         struct list_head coll_list;
136 
137         u32 collection_id;
138         u32 target_addr;
139 };
140 
141 #define its_is_collection_mapped(coll) ((coll) && \
142                                 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
143 
144 struct its_ite {
145         struct list_head ite_list;
146 
147         struct vgic_irq *irq;
148         struct its_collection *collection;
149         u32 event_id;
150 };
151 
152 /**
153  * struct vgic_its_abi - ITS abi ops and settings
154  * @cte_esz: collection table entry size
155  * @dte_esz: device table entry size
156  * @ite_esz: interrupt translation table entry size
157  * @save_tables: save the ITS tables into guest RAM
158  * @restore_tables: restore the ITS internal structs from tables
159  *  stored in guest RAM
160  * @commit: initialize the registers which expose the ABI settings,
161  *  especially the entry sizes
162  */
163 struct vgic_its_abi {
164         int cte_esz;
165         int dte_esz;
166         int ite_esz;
167         int (*save_tables)(struct vgic_its *its);
168         int (*restore_tables)(struct vgic_its *its);
169         int (*commit)(struct vgic_its *its);
170 };
171 
172 #define ABI_0_ESZ       8
173 #define ESZ_MAX         ABI_0_ESZ
174 
175 static const struct vgic_its_abi its_table_abi_versions[] = {
176         [0] = {
177          .cte_esz = ABI_0_ESZ,
178          .dte_esz = ABI_0_ESZ,
179          .ite_esz = ABI_0_ESZ,
180          .save_tables = vgic_its_save_tables_v0,
181          .restore_tables = vgic_its_restore_tables_v0,
182          .commit = vgic_its_commit_v0,
183         },
184 };
185 
186 #define NR_ITS_ABIS     ARRAY_SIZE(its_table_abi_versions)
187 
188 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
189 {
190         return &its_table_abi_versions[its->abi_rev];
191 }
192 
193 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
194 {
195         const struct vgic_its_abi *abi;
196 
197         its->abi_rev = rev;
198         abi = vgic_its_get_abi(its);
199         return abi->commit(its);
200 }
201 
202 /*
203  * Find and returns a device in the device table for an ITS.
204  * Must be called with the its_lock mutex held.
205  */
206 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
207 {
208         struct its_device *device;
209 
210         list_for_each_entry(device, &its->device_list, dev_list)
211                 if (device_id == device->device_id)
212                         return device;
213 
214         return NULL;
215 }
216 
217 /*
218  * Find and returns an interrupt translation table entry (ITTE) for a given
219  * Device ID/Event ID pair on an ITS.
220  * Must be called with the its_lock mutex held.
221  */
222 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
223                                   u32 event_id)
224 {
225         struct its_device *device;
226         struct its_ite *ite;
227 
228         device = find_its_device(its, device_id);
229         if (device == NULL)
230                 return NULL;
231 
232         list_for_each_entry(ite, &device->itt_head, ite_list)
233                 if (ite->event_id == event_id)
234                         return ite;
235 
236         return NULL;
237 }
238 
239 /* To be used as an iterator this macro misses the enclosing parentheses */
240 #define for_each_lpi_its(dev, ite, its) \
241         list_for_each_entry(dev, &(its)->device_list, dev_list) \
242                 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
243 
244 #define GIC_LPI_OFFSET 8192
245 
246 #define VITS_TYPER_IDBITS               16
247 #define VITS_MAX_EVENTID                (BIT(VITS_TYPER_IDBITS) - 1)
248 #define VITS_TYPER_DEVBITS              16
249 #define VITS_MAX_DEVID                  (BIT(VITS_TYPER_DEVBITS) - 1)
250 #define VITS_DTE_MAX_DEVID_OFFSET       (BIT(14) - 1)
251 #define VITS_ITE_MAX_EVENTID_OFFSET     (BIT(16) - 1)
252 
253 /*
254  * Finds and returns a collection in the ITS collection table.
255  * Must be called with the its_lock mutex held.
256  */
257 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
258 {
259         struct its_collection *collection;
260 
261         list_for_each_entry(collection, &its->collection_list, coll_list) {
262                 if (coll_id == collection->collection_id)
263                         return collection;
264         }
265 
266         return NULL;
267 }
268 
269 #define LPI_PROP_ENABLE_BIT(p)  ((p) & LPI_PROP_ENABLED)
270 #define LPI_PROP_PRIORITY(p)    ((p) & 0xfc)
271 
272 /*
273  * Reads the configuration data for a given LPI from guest memory and
274  * updates the fields in struct vgic_irq.
275  * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
276  * VCPU. Unconditionally applies if filter_vcpu is NULL.
277  */
278 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
279                              struct kvm_vcpu *filter_vcpu, bool needs_inv)
280 {
281         u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
282         u8 prop;
283         int ret;
284         unsigned long flags;
285 
286         ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
287                                   &prop, 1);
288 
289         if (ret)
290                 return ret;
291 
292         raw_spin_lock_irqsave(&irq->irq_lock, flags);
293 
294         if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
295                 irq->priority = LPI_PROP_PRIORITY(prop);
296                 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
297 
298                 if (!irq->hw) {
299                         vgic_queue_irq_unlock(kvm, irq, flags);
300                         return 0;
301                 }
302         }
303 
304         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
305 
306         if (irq->hw)
307                 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
308 
309         return 0;
310 }
311 
312 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
313 {
314         int ret = 0;
315         unsigned long flags;
316 
317         raw_spin_lock_irqsave(&irq->irq_lock, flags);
318         irq->target_vcpu = vcpu;
319         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
320 
321         if (irq->hw) {
322                 struct its_vlpi_map map;
323 
324                 ret = its_get_vlpi(irq->host_irq, &map);
325                 if (ret)
326                         return ret;
327 
328                 if (map.vpe)
329                         atomic_dec(&map.vpe->vlpi_count);
330                 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
331                 atomic_inc(&map.vpe->vlpi_count);
332 
333                 ret = its_map_vlpi(irq->host_irq, &map);
334         }
335 
336         return ret;
337 }
338 
339 static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
340                                            struct its_collection *col)
341 {
342         return kvm_get_vcpu_by_id(kvm, col->target_addr);
343 }
344 
345 /*
346  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
347  * is targeting) to the VGIC's view, which deals with target VCPUs.
348  * Needs to be called whenever either the collection for a LPIs has
349  * changed or the collection itself got retargeted.
350  */
351 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
352 {
353         struct kvm_vcpu *vcpu;
354 
355         if (!its_is_collection_mapped(ite->collection))
356                 return;
357 
358         vcpu = collection_to_vcpu(kvm, ite->collection);
359         update_affinity(ite->irq, vcpu);
360 }
361 
362 /*
363  * Updates the target VCPU for every LPI targeting this collection.
364  * Must be called with the its_lock mutex held.
365  */
366 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
367                                        struct its_collection *coll)
368 {
369         struct its_device *device;
370         struct its_ite *ite;
371 
372         for_each_lpi_its(device, ite, its) {
373                 if (ite->collection != coll)
374                         continue;
375 
376                 update_affinity_ite(kvm, ite);
377         }
378 }
379 
380 static u32 max_lpis_propbaser(u64 propbaser)
381 {
382         int nr_idbits = (propbaser & 0x1f) + 1;
383 
384         return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
385 }
386 
387 /*
388  * Sync the pending table pending bit of LPIs targeting @vcpu
389  * with our own data structures. This relies on the LPI being
390  * mapped before.
391  */
392 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
393 {
394         gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
395         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
396         unsigned long intid, flags;
397         struct vgic_irq *irq;
398         int last_byte_offset = -1;
399         int ret = 0;
400         u8 pendmask;
401 
402         xa_for_each(&dist->lpi_xa, intid, irq) {
403                 int byte_offset, bit_nr;
404 
405                 byte_offset = intid / BITS_PER_BYTE;
406                 bit_nr = intid % BITS_PER_BYTE;
407 
408                 /*
409                  * For contiguously allocated LPIs chances are we just read
410                  * this very same byte in the last iteration. Reuse that.
411                  */
412                 if (byte_offset != last_byte_offset) {
413                         ret = kvm_read_guest_lock(vcpu->kvm,
414                                                   pendbase + byte_offset,
415                                                   &pendmask, 1);
416                         if (ret)
417                                 return ret;
418 
419                         last_byte_offset = byte_offset;
420                 }
421 
422                 irq = vgic_get_irq(vcpu->kvm, NULL, intid);
423                 if (!irq)
424                         continue;
425 
426                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
427                 if (irq->target_vcpu == vcpu)
428                         irq->pending_latch = pendmask & (1U << bit_nr);
429                 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
430                 vgic_put_irq(vcpu->kvm, irq);
431         }
432 
433         return ret;
434 }
435 
436 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
437                                               struct vgic_its *its,
438                                               gpa_t addr, unsigned int len)
439 {
440         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
441         u64 reg = GITS_TYPER_PLPIS;
442 
443         /*
444          * We use linear CPU numbers for redistributor addressing,
445          * so GITS_TYPER.PTA is 0.
446          * Also we force all PROPBASER registers to be the same, so
447          * CommonLPIAff is 0 as well.
448          * To avoid memory waste in the guest, we keep the number of IDBits and
449          * DevBits low - as least for the time being.
450          */
451         reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
452         reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
453         reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
454 
455         return extract_bytes(reg, addr & 7, len);
456 }
457 
458 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
459                                              struct vgic_its *its,
460                                              gpa_t addr, unsigned int len)
461 {
462         u32 val;
463 
464         val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
465         val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
466         return val;
467 }
468 
469 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
470                                             struct vgic_its *its,
471                                             gpa_t addr, unsigned int len,
472                                             unsigned long val)
473 {
474         u32 rev = GITS_IIDR_REV(val);
475 
476         if (rev >= NR_ITS_ABIS)
477                 return -EINVAL;
478         return vgic_its_set_abi(its, rev);
479 }
480 
481 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
482                                                struct vgic_its *its,
483                                                gpa_t addr, unsigned int len)
484 {
485         switch (addr & 0xffff) {
486         case GITS_PIDR0:
487                 return 0x92;    /* part number, bits[7:0] */
488         case GITS_PIDR1:
489                 return 0xb4;    /* part number, bits[11:8] */
490         case GITS_PIDR2:
491                 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
492         case GITS_PIDR4:
493                 return 0x40;    /* This is a 64K software visible page */
494         /* The following are the ID registers for (any) GIC. */
495         case GITS_CIDR0:
496                 return 0x0d;
497         case GITS_CIDR1:
498                 return 0xf0;
499         case GITS_CIDR2:
500                 return 0x05;
501         case GITS_CIDR3:
502                 return 0xb1;
503         }
504 
505         return 0;
506 }
507 
508 static struct vgic_its *__vgic_doorbell_to_its(struct kvm *kvm, gpa_t db)
509 {
510         struct kvm_io_device *kvm_io_dev;
511         struct vgic_io_device *iodev;
512 
513         kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, db);
514         if (!kvm_io_dev)
515                 return ERR_PTR(-EINVAL);
516 
517         if (kvm_io_dev->ops != &kvm_io_gic_ops)
518                 return ERR_PTR(-EINVAL);
519 
520         iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
521         if (iodev->iodev_type != IODEV_ITS)
522                 return ERR_PTR(-EINVAL);
523 
524         return iodev->its;
525 }
526 
527 static unsigned long vgic_its_cache_key(u32 devid, u32 eventid)
528 {
529         return (((unsigned long)devid) << VITS_TYPER_IDBITS) | eventid;
530 
531 }
532 
533 static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
534                                              u32 devid, u32 eventid)
535 {
536         unsigned long cache_key = vgic_its_cache_key(devid, eventid);
537         struct vgic_its *its;
538         struct vgic_irq *irq;
539 
540         if (devid > VITS_MAX_DEVID || eventid > VITS_MAX_EVENTID)
541                 return NULL;
542 
543         its = __vgic_doorbell_to_its(kvm, db);
544         if (IS_ERR(its))
545                 return NULL;
546 
547         rcu_read_lock();
548 
549         irq = xa_load(&its->translation_cache, cache_key);
550         if (!vgic_try_get_irq_kref(irq))
551                 irq = NULL;
552 
553         rcu_read_unlock();
554 
555         return irq;
556 }
557 
558 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
559                                        u32 devid, u32 eventid,
560                                        struct vgic_irq *irq)
561 {
562         unsigned long cache_key = vgic_its_cache_key(devid, eventid);
563         struct vgic_irq *old;
564 
565         /* Do not cache a directly injected interrupt */
566         if (irq->hw)
567                 return;
568 
569         /*
570          * The irq refcount is guaranteed to be nonzero while holding the
571          * its_lock, as the ITE (and the reference it holds) cannot be freed.
572          */
573         lockdep_assert_held(&its->its_lock);
574         vgic_get_irq_kref(irq);
575 
576         /*
577          * We could have raced with another CPU caching the same
578          * translation behind our back, ensure we don't leak a
579          * reference if that is the case.
580          */
581         old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
582         if (old)
583                 vgic_put_irq(kvm, old);
584 }
585 
586 static void vgic_its_invalidate_cache(struct vgic_its *its)
587 {
588         struct kvm *kvm = its->dev->kvm;
589         struct vgic_irq *irq;
590         unsigned long idx;
591 
592         xa_for_each(&its->translation_cache, idx, irq) {
593                 xa_erase(&its->translation_cache, idx);
594                 vgic_put_irq(kvm, irq);
595         }
596 }
597 
598 void vgic_its_invalidate_all_caches(struct kvm *kvm)
599 {
600         struct kvm_device *dev;
601         struct vgic_its *its;
602 
603         rcu_read_lock();
604 
605         list_for_each_entry_rcu(dev, &kvm->devices, vm_node) {
606                 if (dev->ops != &kvm_arm_vgic_its_ops)
607                         continue;
608 
609                 its = dev->private;
610                 vgic_its_invalidate_cache(its);
611         }
612 
613         rcu_read_unlock();
614 }
615 
616 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
617                          u32 devid, u32 eventid, struct vgic_irq **irq)
618 {
619         struct kvm_vcpu *vcpu;
620         struct its_ite *ite;
621 
622         if (!its->enabled)
623                 return -EBUSY;
624 
625         ite = find_ite(its, devid, eventid);
626         if (!ite || !its_is_collection_mapped(ite->collection))
627                 return E_ITS_INT_UNMAPPED_INTERRUPT;
628 
629         vcpu = collection_to_vcpu(kvm, ite->collection);
630         if (!vcpu)
631                 return E_ITS_INT_UNMAPPED_INTERRUPT;
632 
633         if (!vgic_lpis_enabled(vcpu))
634                 return -EBUSY;
635 
636         vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
637 
638         *irq = ite->irq;
639         return 0;
640 }
641 
642 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
643 {
644         u64 address;
645 
646         if (!vgic_has_its(kvm))
647                 return ERR_PTR(-ENODEV);
648 
649         if (!(msi->flags & KVM_MSI_VALID_DEVID))
650                 return ERR_PTR(-EINVAL);
651 
652         address = (u64)msi->address_hi << 32 | msi->address_lo;
653 
654         return __vgic_doorbell_to_its(kvm, address);
655 }
656 
657 /*
658  * Find the target VCPU and the LPI number for a given devid/eventid pair
659  * and make this IRQ pending, possibly injecting it.
660  * Must be called with the its_lock mutex held.
661  * Returns 0 on success, a positive error value for any ITS mapping
662  * related errors and negative error values for generic errors.
663  */
664 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
665                                 u32 devid, u32 eventid)
666 {
667         struct vgic_irq *irq = NULL;
668         unsigned long flags;
669         int err;
670 
671         err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
672         if (err)
673                 return err;
674 
675         if (irq->hw)
676                 return irq_set_irqchip_state(irq->host_irq,
677                                              IRQCHIP_STATE_PENDING, true);
678 
679         raw_spin_lock_irqsave(&irq->irq_lock, flags);
680         irq->pending_latch = true;
681         vgic_queue_irq_unlock(kvm, irq, flags);
682 
683         return 0;
684 }
685 
686 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
687 {
688         struct vgic_irq *irq;
689         unsigned long flags;
690         phys_addr_t db;
691 
692         db = (u64)msi->address_hi << 32 | msi->address_lo;
693         irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
694         if (!irq)
695                 return -EWOULDBLOCK;
696 
697         raw_spin_lock_irqsave(&irq->irq_lock, flags);
698         irq->pending_latch = true;
699         vgic_queue_irq_unlock(kvm, irq, flags);
700         vgic_put_irq(kvm, irq);
701 
702         return 0;
703 }
704 
705 /*
706  * Queries the KVM IO bus framework to get the ITS pointer from the given
707  * doorbell address.
708  * We then call vgic_its_trigger_msi() with the decoded data.
709  * According to the KVM_SIGNAL_MSI API description returns 1 on success.
710  */
711 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
712 {
713         struct vgic_its *its;
714         int ret;
715 
716         if (!vgic_its_inject_cached_translation(kvm, msi))
717                 return 1;
718 
719         its = vgic_msi_to_its(kvm, msi);
720         if (IS_ERR(its))
721                 return PTR_ERR(its);
722 
723         mutex_lock(&its->its_lock);
724         ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
725         mutex_unlock(&its->its_lock);
726 
727         if (ret < 0)
728                 return ret;
729 
730         /*
731          * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
732          * if the guest has blocked the MSI. So we map any LPI mapping
733          * related error to that.
734          */
735         if (ret)
736                 return 0;
737         else
738                 return 1;
739 }
740 
741 /* Requires the its_lock to be held. */
742 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
743 {
744         list_del(&ite->ite_list);
745 
746         /* This put matches the get in vgic_add_lpi. */
747         if (ite->irq) {
748                 if (ite->irq->hw)
749                         WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
750 
751                 vgic_put_irq(kvm, ite->irq);
752         }
753 
754         kfree(ite);
755 }
756 
757 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
758 {
759         return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
760 }
761 
762 #define its_cmd_get_command(cmd)        its_cmd_mask_field(cmd, 0,  0,  8)
763 #define its_cmd_get_deviceid(cmd)       its_cmd_mask_field(cmd, 0, 32, 32)
764 #define its_cmd_get_size(cmd)           (its_cmd_mask_field(cmd, 1,  0,  5) + 1)
765 #define its_cmd_get_id(cmd)             its_cmd_mask_field(cmd, 1,  0, 32)
766 #define its_cmd_get_physical_id(cmd)    its_cmd_mask_field(cmd, 1, 32, 32)
767 #define its_cmd_get_collection(cmd)     its_cmd_mask_field(cmd, 2,  0, 16)
768 #define its_cmd_get_ittaddr(cmd)        (its_cmd_mask_field(cmd, 2,  8, 44) << 8)
769 #define its_cmd_get_target_addr(cmd)    its_cmd_mask_field(cmd, 2, 16, 32)
770 #define its_cmd_get_validbit(cmd)       its_cmd_mask_field(cmd, 2, 63,  1)
771 
772 /*
773  * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
774  * Must be called with the its_lock mutex held.
775  */
776 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
777                                        u64 *its_cmd)
778 {
779         u32 device_id = its_cmd_get_deviceid(its_cmd);
780         u32 event_id = its_cmd_get_id(its_cmd);
781         struct its_ite *ite;
782 
783         ite = find_ite(its, device_id, event_id);
784         if (ite && its_is_collection_mapped(ite->collection)) {
785                 /*
786                  * Though the spec talks about removing the pending state, we
787                  * don't bother here since we clear the ITTE anyway and the
788                  * pending state is a property of the ITTE struct.
789                  */
790                 vgic_its_invalidate_cache(its);
791 
792                 its_free_ite(kvm, ite);
793                 return 0;
794         }
795 
796         return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
797 }
798 
799 /*
800  * The MOVI command moves an ITTE to a different collection.
801  * Must be called with the its_lock mutex held.
802  */
803 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
804                                     u64 *its_cmd)
805 {
806         u32 device_id = its_cmd_get_deviceid(its_cmd);
807         u32 event_id = its_cmd_get_id(its_cmd);
808         u32 coll_id = its_cmd_get_collection(its_cmd);
809         struct kvm_vcpu *vcpu;
810         struct its_ite *ite;
811         struct its_collection *collection;
812 
813         ite = find_ite(its, device_id, event_id);
814         if (!ite)
815                 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
816 
817         if (!its_is_collection_mapped(ite->collection))
818                 return E_ITS_MOVI_UNMAPPED_COLLECTION;
819 
820         collection = find_collection(its, coll_id);
821         if (!its_is_collection_mapped(collection))
822                 return E_ITS_MOVI_UNMAPPED_COLLECTION;
823 
824         ite->collection = collection;
825         vcpu = collection_to_vcpu(kvm, collection);
826 
827         vgic_its_invalidate_cache(its);
828 
829         return update_affinity(ite->irq, vcpu);
830 }
831 
832 static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa)
833 {
834         gfn_t gfn = gpa >> PAGE_SHIFT;
835         int idx;
836         bool ret;
837 
838         idx = srcu_read_lock(&its->dev->kvm->srcu);
839         ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
840         srcu_read_unlock(&its->dev->kvm->srcu, idx);
841         return ret;
842 }
843 
844 /*
845  * Check whether an ID can be stored into the corresponding guest table.
846  * For a direct table this is pretty easy, but gets a bit nasty for
847  * indirect tables. We check whether the resulting guest physical address
848  * is actually valid (covered by a memslot and guest accessible).
849  * For this we have to read the respective first level entry.
850  */
851 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
852                               gpa_t *eaddr)
853 {
854         int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
855         u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
856         phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
857         int esz = GITS_BASER_ENTRY_SIZE(baser);
858         int index;
859 
860         switch (type) {
861         case GITS_BASER_TYPE_DEVICE:
862                 if (id > VITS_MAX_DEVID)
863                         return false;
864                 break;
865         case GITS_BASER_TYPE_COLLECTION:
866                 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
867                 if (id >= BIT_ULL(16))
868                         return false;
869                 break;
870         default:
871                 return false;
872         }
873 
874         if (!(baser & GITS_BASER_INDIRECT)) {
875                 phys_addr_t addr;
876 
877                 if (id >= (l1_tbl_size / esz))
878                         return false;
879 
880                 addr = base + id * esz;
881 
882                 if (eaddr)
883                         *eaddr = addr;
884 
885                 return __is_visible_gfn_locked(its, addr);
886         }
887 
888         /* calculate and check the index into the 1st level */
889         index = id / (SZ_64K / esz);
890         if (index >= (l1_tbl_size / sizeof(u64)))
891                 return false;
892 
893         /* Each 1st level entry is represented by a 64-bit value. */
894         if (kvm_read_guest_lock(its->dev->kvm,
895                            base + index * sizeof(indirect_ptr),
896                            &indirect_ptr, sizeof(indirect_ptr)))
897                 return false;
898 
899         indirect_ptr = le64_to_cpu(indirect_ptr);
900 
901         /* check the valid bit of the first level entry */
902         if (!(indirect_ptr & BIT_ULL(63)))
903                 return false;
904 
905         /* Mask the guest physical address and calculate the frame number. */
906         indirect_ptr &= GENMASK_ULL(51, 16);
907 
908         /* Find the address of the actual entry */
909         index = id % (SZ_64K / esz);
910         indirect_ptr += index * esz;
911 
912         if (eaddr)
913                 *eaddr = indirect_ptr;
914 
915         return __is_visible_gfn_locked(its, indirect_ptr);
916 }
917 
918 /*
919  * Check whether an event ID can be stored in the corresponding Interrupt
920  * Translation Table, which starts at device->itt_addr.
921  */
922 static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device,
923                 u32 event_id)
924 {
925         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
926         int ite_esz = abi->ite_esz;
927         gpa_t gpa;
928 
929         /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */
930         if (event_id >= BIT_ULL(device->num_eventid_bits))
931                 return false;
932 
933         gpa = device->itt_addr + event_id * ite_esz;
934         return __is_visible_gfn_locked(its, gpa);
935 }
936 
937 /*
938  * Add a new collection into the ITS collection table.
939  * Returns 0 on success, and a negative error value for generic errors.
940  */
941 static int vgic_its_alloc_collection(struct vgic_its *its,
942                                      struct its_collection **colp,
943                                      u32 coll_id)
944 {
945         struct its_collection *collection;
946 
947         collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT);
948         if (!collection)
949                 return -ENOMEM;
950 
951         collection->collection_id = coll_id;
952         collection->target_addr = COLLECTION_NOT_MAPPED;
953 
954         list_add_tail(&collection->coll_list, &its->collection_list);
955         *colp = collection;
956 
957         return 0;
958 }
959 
960 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
961 {
962         struct its_collection *collection;
963         struct its_device *device;
964         struct its_ite *ite;
965 
966         /*
967          * Clearing the mapping for that collection ID removes the
968          * entry from the list. If there wasn't any before, we can
969          * go home early.
970          */
971         collection = find_collection(its, coll_id);
972         if (!collection)
973                 return;
974 
975         for_each_lpi_its(device, ite, its)
976                 if (ite->collection &&
977                     ite->collection->collection_id == coll_id)
978                         ite->collection = NULL;
979 
980         list_del(&collection->coll_list);
981         kfree(collection);
982 }
983 
984 /* Must be called with its_lock mutex held */
985 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
986                                           struct its_collection *collection,
987                                           u32 event_id)
988 {
989         struct its_ite *ite;
990 
991         ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT);
992         if (!ite)
993                 return ERR_PTR(-ENOMEM);
994 
995         ite->event_id   = event_id;
996         ite->collection = collection;
997 
998         list_add_tail(&ite->ite_list, &device->itt_head);
999         return ite;
1000 }
1001 
1002 /*
1003  * The MAPTI and MAPI commands map LPIs to ITTEs.
1004  * Must be called with its_lock mutex held.
1005  */
1006 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1007                                     u64 *its_cmd)
1008 {
1009         u32 device_id = its_cmd_get_deviceid(its_cmd);
1010         u32 event_id = its_cmd_get_id(its_cmd);
1011         u32 coll_id = its_cmd_get_collection(its_cmd);
1012         struct its_ite *ite;
1013         struct kvm_vcpu *vcpu = NULL;
1014         struct its_device *device;
1015         struct its_collection *collection, *new_coll = NULL;
1016         struct vgic_irq *irq;
1017         int lpi_nr;
1018 
1019         device = find_its_device(its, device_id);
1020         if (!device)
1021                 return E_ITS_MAPTI_UNMAPPED_DEVICE;
1022 
1023         if (!vgic_its_check_event_id(its, device, event_id))
1024                 return E_ITS_MAPTI_ID_OOR;
1025 
1026         if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1027                 lpi_nr = its_cmd_get_physical_id(its_cmd);
1028         else
1029                 lpi_nr = event_id;
1030         if (lpi_nr < GIC_LPI_OFFSET ||
1031             lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1032                 return E_ITS_MAPTI_PHYSICALID_OOR;
1033 
1034         /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1035         if (find_ite(its, device_id, event_id))
1036                 return 0;
1037 
1038         collection = find_collection(its, coll_id);
1039         if (!collection) {
1040                 int ret;
1041 
1042                 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
1043                         return E_ITS_MAPC_COLLECTION_OOR;
1044 
1045                 ret = vgic_its_alloc_collection(its, &collection, coll_id);
1046                 if (ret)
1047                         return ret;
1048                 new_coll = collection;
1049         }
1050 
1051         ite = vgic_its_alloc_ite(device, collection, event_id);
1052         if (IS_ERR(ite)) {
1053                 if (new_coll)
1054                         vgic_its_free_collection(its, coll_id);
1055                 return PTR_ERR(ite);
1056         }
1057 
1058         if (its_is_collection_mapped(collection))
1059                 vcpu = collection_to_vcpu(kvm, collection);
1060 
1061         irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1062         if (IS_ERR(irq)) {
1063                 if (new_coll)
1064                         vgic_its_free_collection(its, coll_id);
1065                 its_free_ite(kvm, ite);
1066                 return PTR_ERR(irq);
1067         }
1068         ite->irq = irq;
1069 
1070         return 0;
1071 }
1072 
1073 /* Requires the its_lock to be held. */
1074 static void vgic_its_free_device(struct kvm *kvm, struct vgic_its *its,
1075                                  struct its_device *device)
1076 {
1077         struct its_ite *ite, *temp;
1078 
1079         /*
1080          * The spec says that unmapping a device with still valid
1081          * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1082          * since we cannot leave the memory unreferenced.
1083          */
1084         list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1085                 its_free_ite(kvm, ite);
1086 
1087         vgic_its_invalidate_cache(its);
1088 
1089         list_del(&device->dev_list);
1090         kfree(device);
1091 }
1092 
1093 /* its lock must be held */
1094 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1095 {
1096         struct its_device *cur, *temp;
1097 
1098         list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1099                 vgic_its_free_device(kvm, its, cur);
1100 }
1101 
1102 /* its lock must be held */
1103 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1104 {
1105         struct its_collection *cur, *temp;
1106 
1107         list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1108                 vgic_its_free_collection(its, cur->collection_id);
1109 }
1110 
1111 /* Must be called with its_lock mutex held */
1112 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1113                                                 u32 device_id, gpa_t itt_addr,
1114                                                 u8 num_eventid_bits)
1115 {
1116         struct its_device *device;
1117 
1118         device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT);
1119         if (!device)
1120                 return ERR_PTR(-ENOMEM);
1121 
1122         device->device_id = device_id;
1123         device->itt_addr = itt_addr;
1124         device->num_eventid_bits = num_eventid_bits;
1125         INIT_LIST_HEAD(&device->itt_head);
1126 
1127         list_add_tail(&device->dev_list, &its->device_list);
1128         return device;
1129 }
1130 
1131 /*
1132  * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1133  * Must be called with the its_lock mutex held.
1134  */
1135 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1136                                     u64 *its_cmd)
1137 {
1138         u32 device_id = its_cmd_get_deviceid(its_cmd);
1139         bool valid = its_cmd_get_validbit(its_cmd);
1140         u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1141         gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1142         struct its_device *device;
1143 
1144         if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1145                 return E_ITS_MAPD_DEVICE_OOR;
1146 
1147         if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1148                 return E_ITS_MAPD_ITTSIZE_OOR;
1149 
1150         device = find_its_device(its, device_id);
1151 
1152         /*
1153          * The spec says that calling MAPD on an already mapped device
1154          * invalidates all cached data for this device. We implement this
1155          * by removing the mapping and re-establishing it.
1156          */
1157         if (device)
1158                 vgic_its_free_device(kvm, its, device);
1159 
1160         /*
1161          * The spec does not say whether unmapping a not-mapped device
1162          * is an error, so we are done in any case.
1163          */
1164         if (!valid)
1165                 return 0;
1166 
1167         device = vgic_its_alloc_device(its, device_id, itt_addr,
1168                                        num_eventid_bits);
1169 
1170         return PTR_ERR_OR_ZERO(device);
1171 }
1172 
1173 /*
1174  * The MAPC command maps collection IDs to redistributors.
1175  * Must be called with the its_lock mutex held.
1176  */
1177 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1178                                     u64 *its_cmd)
1179 {
1180         u16 coll_id;
1181         struct its_collection *collection;
1182         bool valid;
1183 
1184         valid = its_cmd_get_validbit(its_cmd);
1185         coll_id = its_cmd_get_collection(its_cmd);
1186 
1187         if (!valid) {
1188                 vgic_its_free_collection(its, coll_id);
1189                 vgic_its_invalidate_cache(its);
1190         } else {
1191                 struct kvm_vcpu *vcpu;
1192 
1193                 vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1194                 if (!vcpu)
1195                         return E_ITS_MAPC_PROCNUM_OOR;
1196 
1197                 collection = find_collection(its, coll_id);
1198 
1199                 if (!collection) {
1200                         int ret;
1201 
1202                         if (!vgic_its_check_id(its, its->baser_coll_table,
1203                                                 coll_id, NULL))
1204                                 return E_ITS_MAPC_COLLECTION_OOR;
1205 
1206                         ret = vgic_its_alloc_collection(its, &collection,
1207                                                         coll_id);
1208                         if (ret)
1209                                 return ret;
1210                         collection->target_addr = vcpu->vcpu_id;
1211                 } else {
1212                         collection->target_addr = vcpu->vcpu_id;
1213                         update_affinity_collection(kvm, its, collection);
1214                 }
1215         }
1216 
1217         return 0;
1218 }
1219 
1220 /*
1221  * The CLEAR command removes the pending state for a particular LPI.
1222  * Must be called with the its_lock mutex held.
1223  */
1224 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1225                                      u64 *its_cmd)
1226 {
1227         u32 device_id = its_cmd_get_deviceid(its_cmd);
1228         u32 event_id = its_cmd_get_id(its_cmd);
1229         struct its_ite *ite;
1230 
1231 
1232         ite = find_ite(its, device_id, event_id);
1233         if (!ite)
1234                 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1235 
1236         ite->irq->pending_latch = false;
1237 
1238         if (ite->irq->hw)
1239                 return irq_set_irqchip_state(ite->irq->host_irq,
1240                                              IRQCHIP_STATE_PENDING, false);
1241 
1242         return 0;
1243 }
1244 
1245 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
1246 {
1247         return update_lpi_config(kvm, irq, NULL, true);
1248 }
1249 
1250 /*
1251  * The INV command syncs the configuration bits from the memory table.
1252  * Must be called with the its_lock mutex held.
1253  */
1254 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1255                                    u64 *its_cmd)
1256 {
1257         u32 device_id = its_cmd_get_deviceid(its_cmd);
1258         u32 event_id = its_cmd_get_id(its_cmd);
1259         struct its_ite *ite;
1260 
1261 
1262         ite = find_ite(its, device_id, event_id);
1263         if (!ite)
1264                 return E_ITS_INV_UNMAPPED_INTERRUPT;
1265 
1266         return vgic_its_inv_lpi(kvm, ite->irq);
1267 }
1268 
1269 /**
1270  * vgic_its_invall - invalidate all LPIs targeting a given vcpu
1271  * @vcpu: the vcpu for which the RD is targeted by an invalidation
1272  *
1273  * Contrary to the INVALL command, this targets a RD instead of a
1274  * collection, and we don't need to hold the its_lock, since no ITS is
1275  * involved here.
1276  */
1277 int vgic_its_invall(struct kvm_vcpu *vcpu)
1278 {
1279         struct kvm *kvm = vcpu->kvm;
1280         struct vgic_dist *dist = &kvm->arch.vgic;
1281         struct vgic_irq *irq;
1282         unsigned long intid;
1283 
1284         xa_for_each(&dist->lpi_xa, intid, irq) {
1285                 irq = vgic_get_irq(kvm, NULL, intid);
1286                 if (!irq)
1287                         continue;
1288 
1289                 update_lpi_config(kvm, irq, vcpu, false);
1290                 vgic_put_irq(kvm, irq);
1291         }
1292 
1293         if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1294                 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1295 
1296         return 0;
1297 }
1298 
1299 /*
1300  * The INVALL command requests flushing of all IRQ data in this collection.
1301  * Find the VCPU mapped to that collection, then iterate over the VM's list
1302  * of mapped LPIs and update the configuration for each IRQ which targets
1303  * the specified vcpu. The configuration will be read from the in-memory
1304  * configuration table.
1305  * Must be called with the its_lock mutex held.
1306  */
1307 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1308                                       u64 *its_cmd)
1309 {
1310         u32 coll_id = its_cmd_get_collection(its_cmd);
1311         struct its_collection *collection;
1312         struct kvm_vcpu *vcpu;
1313 
1314         collection = find_collection(its, coll_id);
1315         if (!its_is_collection_mapped(collection))
1316                 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1317 
1318         vcpu = collection_to_vcpu(kvm, collection);
1319         vgic_its_invall(vcpu);
1320 
1321         return 0;
1322 }
1323 
1324 /*
1325  * The MOVALL command moves the pending state of all IRQs targeting one
1326  * redistributor to another. We don't hold the pending state in the VCPUs,
1327  * but in the IRQs instead, so there is really not much to do for us here.
1328  * However the spec says that no IRQ must target the old redistributor
1329  * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1330  * This command affects all LPIs in the system that target that redistributor.
1331  */
1332 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1333                                       u64 *its_cmd)
1334 {
1335         struct vgic_dist *dist = &kvm->arch.vgic;
1336         struct kvm_vcpu *vcpu1, *vcpu2;
1337         struct vgic_irq *irq;
1338         unsigned long intid;
1339 
1340         /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1341         vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1342         vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
1343 
1344         if (!vcpu1 || !vcpu2)
1345                 return E_ITS_MOVALL_PROCNUM_OOR;
1346 
1347         if (vcpu1 == vcpu2)
1348                 return 0;
1349 
1350         xa_for_each(&dist->lpi_xa, intid, irq) {
1351                 irq = vgic_get_irq(kvm, NULL, intid);
1352                 if (!irq)
1353                         continue;
1354 
1355                 update_affinity(irq, vcpu2);
1356 
1357                 vgic_put_irq(kvm, irq);
1358         }
1359 
1360         vgic_its_invalidate_cache(its);
1361 
1362         return 0;
1363 }
1364 
1365 /*
1366  * The INT command injects the LPI associated with that DevID/EvID pair.
1367  * Must be called with the its_lock mutex held.
1368  */
1369 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1370                                    u64 *its_cmd)
1371 {
1372         u32 msi_data = its_cmd_get_id(its_cmd);
1373         u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1374 
1375         return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1376 }
1377 
1378 /*
1379  * This function is called with the its_cmd lock held, but the ITS data
1380  * structure lock dropped.
1381  */
1382 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1383                                    u64 *its_cmd)
1384 {
1385         int ret = -ENODEV;
1386 
1387         mutex_lock(&its->its_lock);
1388         switch (its_cmd_get_command(its_cmd)) {
1389         case GITS_CMD_MAPD:
1390                 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1391                 break;
1392         case GITS_CMD_MAPC:
1393                 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1394                 break;
1395         case GITS_CMD_MAPI:
1396                 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1397                 break;
1398         case GITS_CMD_MAPTI:
1399                 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1400                 break;
1401         case GITS_CMD_MOVI:
1402                 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1403                 break;
1404         case GITS_CMD_DISCARD:
1405                 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1406                 break;
1407         case GITS_CMD_CLEAR:
1408                 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1409                 break;
1410         case GITS_CMD_MOVALL:
1411                 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1412                 break;
1413         case GITS_CMD_INT:
1414                 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1415                 break;
1416         case GITS_CMD_INV:
1417                 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1418                 break;
1419         case GITS_CMD_INVALL:
1420                 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1421                 break;
1422         case GITS_CMD_SYNC:
1423                 /* we ignore this command: we are in sync all of the time */
1424                 ret = 0;
1425                 break;
1426         }
1427         mutex_unlock(&its->its_lock);
1428 
1429         return ret;
1430 }
1431 
1432 static u64 vgic_sanitise_its_baser(u64 reg)
1433 {
1434         reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1435                                   GITS_BASER_SHAREABILITY_SHIFT,
1436                                   vgic_sanitise_shareability);
1437         reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1438                                   GITS_BASER_INNER_CACHEABILITY_SHIFT,
1439                                   vgic_sanitise_inner_cacheability);
1440         reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1441                                   GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1442                                   vgic_sanitise_outer_cacheability);
1443 
1444         /* We support only one (ITS) page size: 64K */
1445         reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1446 
1447         return reg;
1448 }
1449 
1450 static u64 vgic_sanitise_its_cbaser(u64 reg)
1451 {
1452         reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1453                                   GITS_CBASER_SHAREABILITY_SHIFT,
1454                                   vgic_sanitise_shareability);
1455         reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1456                                   GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1457                                   vgic_sanitise_inner_cacheability);
1458         reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1459                                   GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1460                                   vgic_sanitise_outer_cacheability);
1461 
1462         /* Sanitise the physical address to be 64k aligned. */
1463         reg &= ~GENMASK_ULL(15, 12);
1464 
1465         return reg;
1466 }
1467 
1468 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1469                                                struct vgic_its *its,
1470                                                gpa_t addr, unsigned int len)
1471 {
1472         return extract_bytes(its->cbaser, addr & 7, len);
1473 }
1474 
1475 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1476                                        gpa_t addr, unsigned int len,
1477                                        unsigned long val)
1478 {
1479         /* When GITS_CTLR.Enable is 1, this register is RO. */
1480         if (its->enabled)
1481                 return;
1482 
1483         mutex_lock(&its->cmd_lock);
1484         its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1485         its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1486         its->creadr = 0;
1487         /*
1488          * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1489          * it to CREADR to make sure we start with an empty command buffer.
1490          */
1491         its->cwriter = its->creadr;
1492         mutex_unlock(&its->cmd_lock);
1493 }
1494 
1495 #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
1496 #define ITS_CMD_SIZE                    32
1497 #define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
1498 
1499 /* Must be called with the cmd_lock held. */
1500 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1501 {
1502         gpa_t cbaser;
1503         u64 cmd_buf[4];
1504 
1505         /* Commands are only processed when the ITS is enabled. */
1506         if (!its->enabled)
1507                 return;
1508 
1509         cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1510 
1511         while (its->cwriter != its->creadr) {
1512                 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1513                                               cmd_buf, ITS_CMD_SIZE);
1514                 /*
1515                  * If kvm_read_guest() fails, this could be due to the guest
1516                  * programming a bogus value in CBASER or something else going
1517                  * wrong from which we cannot easily recover.
1518                  * According to section 6.3.2 in the GICv3 spec we can just
1519                  * ignore that command then.
1520                  */
1521                 if (!ret)
1522                         vgic_its_handle_command(kvm, its, cmd_buf);
1523 
1524                 its->creadr += ITS_CMD_SIZE;
1525                 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1526                         its->creadr = 0;
1527         }
1528 }
1529 
1530 /*
1531  * By writing to CWRITER the guest announces new commands to be processed.
1532  * To avoid any races in the first place, we take the its_cmd lock, which
1533  * protects our ring buffer variables, so that there is only one user
1534  * per ITS handling commands at a given time.
1535  */
1536 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1537                                         gpa_t addr, unsigned int len,
1538                                         unsigned long val)
1539 {
1540         u64 reg;
1541 
1542         if (!its)
1543                 return;
1544 
1545         mutex_lock(&its->cmd_lock);
1546 
1547         reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1548         reg = ITS_CMD_OFFSET(reg);
1549         if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1550                 mutex_unlock(&its->cmd_lock);
1551                 return;
1552         }
1553         its->cwriter = reg;
1554 
1555         vgic_its_process_commands(kvm, its);
1556 
1557         mutex_unlock(&its->cmd_lock);
1558 }
1559 
1560 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1561                                                 struct vgic_its *its,
1562                                                 gpa_t addr, unsigned int len)
1563 {
1564         return extract_bytes(its->cwriter, addr & 0x7, len);
1565 }
1566 
1567 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1568                                                struct vgic_its *its,
1569                                                gpa_t addr, unsigned int len)
1570 {
1571         return extract_bytes(its->creadr, addr & 0x7, len);
1572 }
1573 
1574 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1575                                               struct vgic_its *its,
1576                                               gpa_t addr, unsigned int len,
1577                                               unsigned long val)
1578 {
1579         u32 cmd_offset;
1580         int ret = 0;
1581 
1582         mutex_lock(&its->cmd_lock);
1583 
1584         if (its->enabled) {
1585                 ret = -EBUSY;
1586                 goto out;
1587         }
1588 
1589         cmd_offset = ITS_CMD_OFFSET(val);
1590         if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1591                 ret = -EINVAL;
1592                 goto out;
1593         }
1594 
1595         its->creadr = cmd_offset;
1596 out:
1597         mutex_unlock(&its->cmd_lock);
1598         return ret;
1599 }
1600 
1601 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1602 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1603                                               struct vgic_its *its,
1604                                               gpa_t addr, unsigned int len)
1605 {
1606         u64 reg;
1607 
1608         switch (BASER_INDEX(addr)) {
1609         case 0:
1610                 reg = its->baser_device_table;
1611                 break;
1612         case 1:
1613                 reg = its->baser_coll_table;
1614                 break;
1615         default:
1616                 reg = 0;
1617                 break;
1618         }
1619 
1620         return extract_bytes(reg, addr & 7, len);
1621 }
1622 
1623 #define GITS_BASER_RO_MASK      (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1624 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1625                                       struct vgic_its *its,
1626                                       gpa_t addr, unsigned int len,
1627                                       unsigned long val)
1628 {
1629         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1630         u64 entry_size, table_type;
1631         u64 reg, *regptr, clearbits = 0;
1632 
1633         /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1634         if (its->enabled)
1635                 return;
1636 
1637         switch (BASER_INDEX(addr)) {
1638         case 0:
1639                 regptr = &its->baser_device_table;
1640                 entry_size = abi->dte_esz;
1641                 table_type = GITS_BASER_TYPE_DEVICE;
1642                 break;
1643         case 1:
1644                 regptr = &its->baser_coll_table;
1645                 entry_size = abi->cte_esz;
1646                 table_type = GITS_BASER_TYPE_COLLECTION;
1647                 clearbits = GITS_BASER_INDIRECT;
1648                 break;
1649         default:
1650                 return;
1651         }
1652 
1653         reg = update_64bit_reg(*regptr, addr & 7, len, val);
1654         reg &= ~GITS_BASER_RO_MASK;
1655         reg &= ~clearbits;
1656 
1657         reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1658         reg |= table_type << GITS_BASER_TYPE_SHIFT;
1659         reg = vgic_sanitise_its_baser(reg);
1660 
1661         *regptr = reg;
1662 
1663         if (!(reg & GITS_BASER_VALID)) {
1664                 /* Take the its_lock to prevent a race with a save/restore */
1665                 mutex_lock(&its->its_lock);
1666                 switch (table_type) {
1667                 case GITS_BASER_TYPE_DEVICE:
1668                         vgic_its_free_device_list(kvm, its);
1669                         break;
1670                 case GITS_BASER_TYPE_COLLECTION:
1671                         vgic_its_free_collection_list(kvm, its);
1672                         break;
1673                 }
1674                 mutex_unlock(&its->its_lock);
1675         }
1676 }
1677 
1678 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1679                                              struct vgic_its *its,
1680                                              gpa_t addr, unsigned int len)
1681 {
1682         u32 reg = 0;
1683 
1684         mutex_lock(&its->cmd_lock);
1685         if (its->creadr == its->cwriter)
1686                 reg |= GITS_CTLR_QUIESCENT;
1687         if (its->enabled)
1688                 reg |= GITS_CTLR_ENABLE;
1689         mutex_unlock(&its->cmd_lock);
1690 
1691         return reg;
1692 }
1693 
1694 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1695                                      gpa_t addr, unsigned int len,
1696                                      unsigned long val)
1697 {
1698         mutex_lock(&its->cmd_lock);
1699 
1700         /*
1701          * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1702          * device/collection BASER are invalid
1703          */
1704         if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1705                 (!(its->baser_device_table & GITS_BASER_VALID) ||
1706                  !(its->baser_coll_table & GITS_BASER_VALID) ||
1707                  !(its->cbaser & GITS_CBASER_VALID)))
1708                 goto out;
1709 
1710         its->enabled = !!(val & GITS_CTLR_ENABLE);
1711         if (!its->enabled)
1712                 vgic_its_invalidate_cache(its);
1713 
1714         /*
1715          * Try to process any pending commands. This function bails out early
1716          * if the ITS is disabled or no commands have been queued.
1717          */
1718         vgic_its_process_commands(kvm, its);
1719 
1720 out:
1721         mutex_unlock(&its->cmd_lock);
1722 }
1723 
1724 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)             \
1725 {                                                               \
1726         .reg_offset = off,                                      \
1727         .len = length,                                          \
1728         .access_flags = acc,                                    \
1729         .its_read = rd,                                         \
1730         .its_write = wr,                                        \
1731 }
1732 
1733 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1734 {                                                               \
1735         .reg_offset = off,                                      \
1736         .len = length,                                          \
1737         .access_flags = acc,                                    \
1738         .its_read = rd,                                         \
1739         .its_write = wr,                                        \
1740         .uaccess_its_write = uwr,                               \
1741 }
1742 
1743 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1744                               gpa_t addr, unsigned int len, unsigned long val)
1745 {
1746         /* Ignore */
1747 }
1748 
1749 static struct vgic_register_region its_registers[] = {
1750         REGISTER_ITS_DESC(GITS_CTLR,
1751                 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1752                 VGIC_ACCESS_32bit),
1753         REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1754                 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1755                 vgic_mmio_uaccess_write_its_iidr, 4,
1756                 VGIC_ACCESS_32bit),
1757         REGISTER_ITS_DESC(GITS_TYPER,
1758                 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1759                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1760         REGISTER_ITS_DESC(GITS_CBASER,
1761                 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1762                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1763         REGISTER_ITS_DESC(GITS_CWRITER,
1764                 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1765                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1766         REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1767                 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1768                 vgic_mmio_uaccess_write_its_creadr, 8,
1769                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1770         REGISTER_ITS_DESC(GITS_BASER,
1771                 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1772                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1773         REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1774                 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1775                 VGIC_ACCESS_32bit),
1776 };
1777 
1778 /* This is called on setting the LPI enable bit in the redistributor. */
1779 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1780 {
1781         if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1782                 its_sync_lpi_pending_table(vcpu);
1783 }
1784 
1785 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1786                                    u64 addr)
1787 {
1788         struct vgic_io_device *iodev = &its->iodev;
1789         int ret;
1790 
1791         mutex_lock(&kvm->slots_lock);
1792         if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1793                 ret = -EBUSY;
1794                 goto out;
1795         }
1796 
1797         its->vgic_its_base = addr;
1798         iodev->regions = its_registers;
1799         iodev->nr_regions = ARRAY_SIZE(its_registers);
1800         kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1801 
1802         iodev->base_addr = its->vgic_its_base;
1803         iodev->iodev_type = IODEV_ITS;
1804         iodev->its = its;
1805         ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1806                                       KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1807 out:
1808         mutex_unlock(&kvm->slots_lock);
1809 
1810         return ret;
1811 }
1812 
1813 #define INITIAL_BASER_VALUE                                               \
1814         (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)                | \
1815          GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)         | \
1816          GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)             | \
1817          GITS_BASER_PAGE_SIZE_64K)
1818 
1819 #define INITIAL_PROPBASER_VALUE                                           \
1820         (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)            | \
1821          GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)     | \
1822          GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1823 
1824 static int vgic_its_create(struct kvm_device *dev, u32 type)
1825 {
1826         int ret;
1827         struct vgic_its *its;
1828 
1829         if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1830                 return -ENODEV;
1831 
1832         its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT);
1833         if (!its)
1834                 return -ENOMEM;
1835 
1836         mutex_lock(&dev->kvm->arch.config_lock);
1837 
1838         if (vgic_initialized(dev->kvm)) {
1839                 ret = vgic_v4_init(dev->kvm);
1840                 if (ret < 0) {
1841                         mutex_unlock(&dev->kvm->arch.config_lock);
1842                         kfree(its);
1843                         return ret;
1844                 }
1845         }
1846 
1847         mutex_init(&its->its_lock);
1848         mutex_init(&its->cmd_lock);
1849 
1850         /* Yep, even more trickery for lock ordering... */
1851 #ifdef CONFIG_LOCKDEP
1852         mutex_lock(&its->cmd_lock);
1853         mutex_lock(&its->its_lock);
1854         mutex_unlock(&its->its_lock);
1855         mutex_unlock(&its->cmd_lock);
1856 #endif
1857 
1858         its->vgic_its_base = VGIC_ADDR_UNDEF;
1859 
1860         INIT_LIST_HEAD(&its->device_list);
1861         INIT_LIST_HEAD(&its->collection_list);
1862         xa_init(&its->translation_cache);
1863 
1864         dev->kvm->arch.vgic.msis_require_devid = true;
1865         dev->kvm->arch.vgic.has_its = true;
1866         its->enabled = false;
1867         its->dev = dev;
1868 
1869         its->baser_device_table = INITIAL_BASER_VALUE                   |
1870                 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1871         its->baser_coll_table = INITIAL_BASER_VALUE |
1872                 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1873         dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1874 
1875         dev->private = its;
1876 
1877         ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1878 
1879         mutex_unlock(&dev->kvm->arch.config_lock);
1880 
1881         return ret;
1882 }
1883 
1884 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1885 {
1886         struct kvm *kvm = kvm_dev->kvm;
1887         struct vgic_its *its = kvm_dev->private;
1888 
1889         mutex_lock(&its->its_lock);
1890 
1891         vgic_its_free_device_list(kvm, its);
1892         vgic_its_free_collection_list(kvm, its);
1893         vgic_its_invalidate_cache(its);
1894         xa_destroy(&its->translation_cache);
1895 
1896         mutex_unlock(&its->its_lock);
1897         kfree(its);
1898         kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1899 }
1900 
1901 static int vgic_its_has_attr_regs(struct kvm_device *dev,
1902                                   struct kvm_device_attr *attr)
1903 {
1904         const struct vgic_register_region *region;
1905         gpa_t offset = attr->attr;
1906         int align;
1907 
1908         align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1909 
1910         if (offset & align)
1911                 return -EINVAL;
1912 
1913         region = vgic_find_mmio_region(its_registers,
1914                                        ARRAY_SIZE(its_registers),
1915                                        offset);
1916         if (!region)
1917                 return -ENXIO;
1918 
1919         return 0;
1920 }
1921 
1922 static int vgic_its_attr_regs_access(struct kvm_device *dev,
1923                                      struct kvm_device_attr *attr,
1924                                      u64 *reg, bool is_write)
1925 {
1926         const struct vgic_register_region *region;
1927         struct vgic_its *its;
1928         gpa_t addr, offset;
1929         unsigned int len;
1930         int align, ret = 0;
1931 
1932         its = dev->private;
1933         offset = attr->attr;
1934 
1935         /*
1936          * Although the spec supports upper/lower 32-bit accesses to
1937          * 64-bit ITS registers, the userspace ABI requires 64-bit
1938          * accesses to all 64-bit wide registers. We therefore only
1939          * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1940          * registers
1941          */
1942         if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1943                 align = 0x3;
1944         else
1945                 align = 0x7;
1946 
1947         if (offset & align)
1948                 return -EINVAL;
1949 
1950         mutex_lock(&dev->kvm->lock);
1951 
1952         if (!lock_all_vcpus(dev->kvm)) {
1953                 mutex_unlock(&dev->kvm->lock);
1954                 return -EBUSY;
1955         }
1956 
1957         mutex_lock(&dev->kvm->arch.config_lock);
1958 
1959         if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1960                 ret = -ENXIO;
1961                 goto out;
1962         }
1963 
1964         region = vgic_find_mmio_region(its_registers,
1965                                        ARRAY_SIZE(its_registers),
1966                                        offset);
1967         if (!region) {
1968                 ret = -ENXIO;
1969                 goto out;
1970         }
1971 
1972         addr = its->vgic_its_base + offset;
1973 
1974         len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1975 
1976         if (is_write) {
1977                 if (region->uaccess_its_write)
1978                         ret = region->uaccess_its_write(dev->kvm, its, addr,
1979                                                         len, *reg);
1980                 else
1981                         region->its_write(dev->kvm, its, addr, len, *reg);
1982         } else {
1983                 *reg = region->its_read(dev->kvm, its, addr, len);
1984         }
1985 out:
1986         mutex_unlock(&dev->kvm->arch.config_lock);
1987         unlock_all_vcpus(dev->kvm);
1988         mutex_unlock(&dev->kvm->lock);
1989         return ret;
1990 }
1991 
1992 static u32 compute_next_devid_offset(struct list_head *h,
1993                                      struct its_device *dev)
1994 {
1995         struct its_device *next;
1996         u32 next_offset;
1997 
1998         if (list_is_last(&dev->dev_list, h))
1999                 return 0;
2000         next = list_next_entry(dev, dev_list);
2001         next_offset = next->device_id - dev->device_id;
2002 
2003         return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2004 }
2005 
2006 static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2007 {
2008         struct its_ite *next;
2009         u32 next_offset;
2010 
2011         if (list_is_last(&ite->ite_list, h))
2012                 return 0;
2013         next = list_next_entry(ite, ite_list);
2014         next_offset = next->event_id - ite->event_id;
2015 
2016         return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2017 }
2018 
2019 /**
2020  * typedef entry_fn_t - Callback called on a table entry restore path
2021  * @its: its handle
2022  * @id: id of the entry
2023  * @entry: pointer to the entry
2024  * @opaque: pointer to an opaque data
2025  *
2026  * Return: < 0 on error, 0 if last element was identified, id offset to next
2027  * element otherwise
2028  */
2029 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2030                           void *opaque);
2031 
2032 /**
2033  * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2034  * to each entry
2035  *
2036  * @its: its handle
2037  * @base: base gpa of the table
2038  * @size: size of the table in bytes
2039  * @esz: entry size in bytes
2040  * @start_id: the ID of the first entry in the table
2041  * (non zero for 2d level tables)
2042  * @fn: function to apply on each entry
2043  *
2044  * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2045  * (the last element may not be found on second level tables)
2046  */
2047 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2048                           int start_id, entry_fn_t fn, void *opaque)
2049 {
2050         struct kvm *kvm = its->dev->kvm;
2051         unsigned long len = size;
2052         int id = start_id;
2053         gpa_t gpa = base;
2054         char entry[ESZ_MAX];
2055         int ret;
2056 
2057         memset(entry, 0, esz);
2058 
2059         while (true) {
2060                 int next_offset;
2061                 size_t byte_offset;
2062 
2063                 ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2064                 if (ret)
2065                         return ret;
2066 
2067                 next_offset = fn(its, id, entry, opaque);
2068                 if (next_offset <= 0)
2069                         return next_offset;
2070 
2071                 byte_offset = next_offset * esz;
2072                 if (byte_offset >= len)
2073                         break;
2074 
2075                 id += next_offset;
2076                 gpa += byte_offset;
2077                 len -= byte_offset;
2078         }
2079         return 1;
2080 }
2081 
2082 /**
2083  * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2084  */
2085 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2086                               struct its_ite *ite, gpa_t gpa, int ite_esz)
2087 {
2088         struct kvm *kvm = its->dev->kvm;
2089         u32 next_offset;
2090         u64 val;
2091 
2092         next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2093         val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2094                ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2095                 ite->collection->collection_id;
2096         val = cpu_to_le64(val);
2097         return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
2098 }
2099 
2100 /**
2101  * vgic_its_restore_ite - restore an interrupt translation entry
2102  * @event_id: id used for indexing
2103  * @ptr: pointer to the ITE entry
2104  * @opaque: pointer to the its_device
2105  */
2106 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2107                                 void *ptr, void *opaque)
2108 {
2109         struct its_device *dev = opaque;
2110         struct its_collection *collection;
2111         struct kvm *kvm = its->dev->kvm;
2112         struct kvm_vcpu *vcpu = NULL;
2113         u64 val;
2114         u64 *p = (u64 *)ptr;
2115         struct vgic_irq *irq;
2116         u32 coll_id, lpi_id;
2117         struct its_ite *ite;
2118         u32 offset;
2119 
2120         val = *p;
2121 
2122         val = le64_to_cpu(val);
2123 
2124         coll_id = val & KVM_ITS_ITE_ICID_MASK;
2125         lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2126 
2127         if (!lpi_id)
2128                 return 1; /* invalid entry, no choice but to scan next entry */
2129 
2130         if (lpi_id < VGIC_MIN_LPI)
2131                 return -EINVAL;
2132 
2133         offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2134         if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2135                 return -EINVAL;
2136 
2137         collection = find_collection(its, coll_id);
2138         if (!collection)
2139                 return -EINVAL;
2140 
2141         if (!vgic_its_check_event_id(its, dev, event_id))
2142                 return -EINVAL;
2143 
2144         ite = vgic_its_alloc_ite(dev, collection, event_id);
2145         if (IS_ERR(ite))
2146                 return PTR_ERR(ite);
2147 
2148         if (its_is_collection_mapped(collection))
2149                 vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
2150 
2151         irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2152         if (IS_ERR(irq)) {
2153                 its_free_ite(kvm, ite);
2154                 return PTR_ERR(irq);
2155         }
2156         ite->irq = irq;
2157 
2158         return offset;
2159 }
2160 
2161 static int vgic_its_ite_cmp(void *priv, const struct list_head *a,
2162                             const struct list_head *b)
2163 {
2164         struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2165         struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2166 
2167         if (itea->event_id < iteb->event_id)
2168                 return -1;
2169         else
2170                 return 1;
2171 }
2172 
2173 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2174 {
2175         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2176         gpa_t base = device->itt_addr;
2177         struct its_ite *ite;
2178         int ret;
2179         int ite_esz = abi->ite_esz;
2180 
2181         list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2182 
2183         list_for_each_entry(ite, &device->itt_head, ite_list) {
2184                 gpa_t gpa = base + ite->event_id * ite_esz;
2185 
2186                 /*
2187                  * If an LPI carries the HW bit, this means that this
2188                  * interrupt is controlled by GICv4, and we do not
2189                  * have direct access to that state without GICv4.1.
2190                  * Let's simply fail the save operation...
2191                  */
2192                 if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
2193                         return -EACCES;
2194 
2195                 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2196                 if (ret)
2197                         return ret;
2198         }
2199         return 0;
2200 }
2201 
2202 /**
2203  * vgic_its_restore_itt - restore the ITT of a device
2204  *
2205  * @its: its handle
2206  * @dev: device handle
2207  *
2208  * Return 0 on success, < 0 on error
2209  */
2210 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2211 {
2212         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2213         gpa_t base = dev->itt_addr;
2214         int ret;
2215         int ite_esz = abi->ite_esz;
2216         size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2217 
2218         ret = scan_its_table(its, base, max_size, ite_esz, 0,
2219                              vgic_its_restore_ite, dev);
2220 
2221         /* scan_its_table returns +1 if all ITEs are invalid */
2222         if (ret > 0)
2223                 ret = 0;
2224 
2225         return ret;
2226 }
2227 
2228 /**
2229  * vgic_its_save_dte - Save a device table entry at a given GPA
2230  *
2231  * @its: ITS handle
2232  * @dev: ITS device
2233  * @ptr: GPA
2234  */
2235 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2236                              gpa_t ptr, int dte_esz)
2237 {
2238         struct kvm *kvm = its->dev->kvm;
2239         u64 val, itt_addr_field;
2240         u32 next_offset;
2241 
2242         itt_addr_field = dev->itt_addr >> 8;
2243         next_offset = compute_next_devid_offset(&its->device_list, dev);
2244         val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2245                ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2246                (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2247                 (dev->num_eventid_bits - 1));
2248         val = cpu_to_le64(val);
2249         return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
2250 }
2251 
2252 /**
2253  * vgic_its_restore_dte - restore a device table entry
2254  *
2255  * @its: its handle
2256  * @id: device id the DTE corresponds to
2257  * @ptr: kernel VA where the 8 byte DTE is located
2258  * @opaque: unused
2259  *
2260  * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2261  * next dte otherwise
2262  */
2263 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2264                                 void *ptr, void *opaque)
2265 {
2266         struct its_device *dev;
2267         u64 baser = its->baser_device_table;
2268         gpa_t itt_addr;
2269         u8 num_eventid_bits;
2270         u64 entry = *(u64 *)ptr;
2271         bool valid;
2272         u32 offset;
2273         int ret;
2274 
2275         entry = le64_to_cpu(entry);
2276 
2277         valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2278         num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2279         itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2280                         >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2281 
2282         if (!valid)
2283                 return 1;
2284 
2285         /* dte entry is valid */
2286         offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2287 
2288         if (!vgic_its_check_id(its, baser, id, NULL))
2289                 return -EINVAL;
2290 
2291         dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2292         if (IS_ERR(dev))
2293                 return PTR_ERR(dev);
2294 
2295         ret = vgic_its_restore_itt(its, dev);
2296         if (ret) {
2297                 vgic_its_free_device(its->dev->kvm, its, dev);
2298                 return ret;
2299         }
2300 
2301         return offset;
2302 }
2303 
2304 static int vgic_its_device_cmp(void *priv, const struct list_head *a,
2305                                const struct list_head *b)
2306 {
2307         struct its_device *deva = container_of(a, struct its_device, dev_list);
2308         struct its_device *devb = container_of(b, struct its_device, dev_list);
2309 
2310         if (deva->device_id < devb->device_id)
2311                 return -1;
2312         else
2313                 return 1;
2314 }
2315 
2316 /**
2317  * vgic_its_save_device_tables - Save the device table and all ITT
2318  * into guest RAM
2319  *
2320  * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2321  * returns the GPA of the device entry
2322  */
2323 static int vgic_its_save_device_tables(struct vgic_its *its)
2324 {
2325         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2326         u64 baser = its->baser_device_table;
2327         struct its_device *dev;
2328         int dte_esz = abi->dte_esz;
2329 
2330         if (!(baser & GITS_BASER_VALID))
2331                 return 0;
2332 
2333         list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2334 
2335         list_for_each_entry(dev, &its->device_list, dev_list) {
2336                 int ret;
2337                 gpa_t eaddr;
2338 
2339                 if (!vgic_its_check_id(its, baser,
2340                                        dev->device_id, &eaddr))
2341                         return -EINVAL;
2342 
2343                 ret = vgic_its_save_itt(its, dev);
2344                 if (ret)
2345                         return ret;
2346 
2347                 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2348                 if (ret)
2349                         return ret;
2350         }
2351         return 0;
2352 }
2353 
2354 /**
2355  * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2356  *
2357  * @its: its handle
2358  * @id: index of the entry in the L1 table
2359  * @addr: kernel VA
2360  * @opaque: unused
2361  *
2362  * L1 table entries are scanned by steps of 1 entry
2363  * Return < 0 if error, 0 if last dte was found when scanning the L2
2364  * table, +1 otherwise (meaning next L1 entry must be scanned)
2365  */
2366 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2367                          void *opaque)
2368 {
2369         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2370         int l2_start_id = id * (SZ_64K / abi->dte_esz);
2371         u64 entry = *(u64 *)addr;
2372         int dte_esz = abi->dte_esz;
2373         gpa_t gpa;
2374         int ret;
2375 
2376         entry = le64_to_cpu(entry);
2377 
2378         if (!(entry & KVM_ITS_L1E_VALID_MASK))
2379                 return 1;
2380 
2381         gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2382 
2383         ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2384                              l2_start_id, vgic_its_restore_dte, NULL);
2385 
2386         return ret;
2387 }
2388 
2389 /**
2390  * vgic_its_restore_device_tables - Restore the device table and all ITT
2391  * from guest RAM to internal data structs
2392  */
2393 static int vgic_its_restore_device_tables(struct vgic_its *its)
2394 {
2395         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2396         u64 baser = its->baser_device_table;
2397         int l1_esz, ret;
2398         int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2399         gpa_t l1_gpa;
2400 
2401         if (!(baser & GITS_BASER_VALID))
2402                 return 0;
2403 
2404         l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2405 
2406         if (baser & GITS_BASER_INDIRECT) {
2407                 l1_esz = GITS_LVL1_ENTRY_SIZE;
2408                 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2409                                      handle_l1_dte, NULL);
2410         } else {
2411                 l1_esz = abi->dte_esz;
2412                 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2413                                      vgic_its_restore_dte, NULL);
2414         }
2415 
2416         /* scan_its_table returns +1 if all entries are invalid */
2417         if (ret > 0)
2418                 ret = 0;
2419 
2420         if (ret < 0)
2421                 vgic_its_free_device_list(its->dev->kvm, its);
2422 
2423         return ret;
2424 }
2425 
2426 static int vgic_its_save_cte(struct vgic_its *its,
2427                              struct its_collection *collection,
2428                              gpa_t gpa, int esz)
2429 {
2430         u64 val;
2431 
2432         val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2433                ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2434                collection->collection_id);
2435         val = cpu_to_le64(val);
2436         return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2437 }
2438 
2439 /*
2440  * Restore a collection entry into the ITS collection table.
2441  * Return +1 on success, 0 if the entry was invalid (which should be
2442  * interpreted as end-of-table), and a negative error value for generic errors.
2443  */
2444 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2445 {
2446         struct its_collection *collection;
2447         struct kvm *kvm = its->dev->kvm;
2448         u32 target_addr, coll_id;
2449         u64 val;
2450         int ret;
2451 
2452         BUG_ON(esz > sizeof(val));
2453         ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2454         if (ret)
2455                 return ret;
2456         val = le64_to_cpu(val);
2457         if (!(val & KVM_ITS_CTE_VALID_MASK))
2458                 return 0;
2459 
2460         target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2461         coll_id = val & KVM_ITS_CTE_ICID_MASK;
2462 
2463         if (target_addr != COLLECTION_NOT_MAPPED &&
2464             !kvm_get_vcpu_by_id(kvm, target_addr))
2465                 return -EINVAL;
2466 
2467         collection = find_collection(its, coll_id);
2468         if (collection)
2469                 return -EEXIST;
2470 
2471         if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
2472                 return -EINVAL;
2473 
2474         ret = vgic_its_alloc_collection(its, &collection, coll_id);
2475         if (ret)
2476                 return ret;
2477         collection->target_addr = target_addr;
2478         return 1;
2479 }
2480 
2481 /**
2482  * vgic_its_save_collection_table - Save the collection table into
2483  * guest RAM
2484  */
2485 static int vgic_its_save_collection_table(struct vgic_its *its)
2486 {
2487         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2488         u64 baser = its->baser_coll_table;
2489         gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2490         struct its_collection *collection;
2491         u64 val;
2492         size_t max_size, filled = 0;
2493         int ret, cte_esz = abi->cte_esz;
2494 
2495         if (!(baser & GITS_BASER_VALID))
2496                 return 0;
2497 
2498         max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2499 
2500         list_for_each_entry(collection, &its->collection_list, coll_list) {
2501                 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2502                 if (ret)
2503                         return ret;
2504                 gpa += cte_esz;
2505                 filled += cte_esz;
2506         }
2507 
2508         if (filled == max_size)
2509                 return 0;
2510 
2511         /*
2512          * table is not fully filled, add a last dummy element
2513          * with valid bit unset
2514          */
2515         val = 0;
2516         BUG_ON(cte_esz > sizeof(val));
2517         ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2518         return ret;
2519 }
2520 
2521 /**
2522  * vgic_its_restore_collection_table - reads the collection table
2523  * in guest memory and restores the ITS internal state. Requires the
2524  * BASER registers to be restored before.
2525  */
2526 static int vgic_its_restore_collection_table(struct vgic_its *its)
2527 {
2528         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2529         u64 baser = its->baser_coll_table;
2530         int cte_esz = abi->cte_esz;
2531         size_t max_size, read = 0;
2532         gpa_t gpa;
2533         int ret;
2534 
2535         if (!(baser & GITS_BASER_VALID))
2536                 return 0;
2537 
2538         gpa = GITS_BASER_ADDR_48_to_52(baser);
2539 
2540         max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2541 
2542         while (read < max_size) {
2543                 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2544                 if (ret <= 0)
2545                         break;
2546                 gpa += cte_esz;
2547                 read += cte_esz;
2548         }
2549 
2550         if (ret > 0)
2551                 return 0;
2552 
2553         if (ret < 0)
2554                 vgic_its_free_collection_list(its->dev->kvm, its);
2555 
2556         return ret;
2557 }
2558 
2559 /**
2560  * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2561  * according to v0 ABI
2562  */
2563 static int vgic_its_save_tables_v0(struct vgic_its *its)
2564 {
2565         int ret;
2566 
2567         ret = vgic_its_save_device_tables(its);
2568         if (ret)
2569                 return ret;
2570 
2571         return vgic_its_save_collection_table(its);
2572 }
2573 
2574 /**
2575  * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2576  * to internal data structs according to V0 ABI
2577  *
2578  */
2579 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2580 {
2581         int ret;
2582 
2583         ret = vgic_its_restore_collection_table(its);
2584         if (ret)
2585                 return ret;
2586 
2587         ret = vgic_its_restore_device_tables(its);
2588         if (ret)
2589                 vgic_its_free_collection_list(its->dev->kvm, its);
2590         return ret;
2591 }
2592 
2593 static int vgic_its_commit_v0(struct vgic_its *its)
2594 {
2595         const struct vgic_its_abi *abi;
2596 
2597         abi = vgic_its_get_abi(its);
2598         its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2599         its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2600 
2601         its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2602                                         << GITS_BASER_ENTRY_SIZE_SHIFT);
2603 
2604         its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2605                                         << GITS_BASER_ENTRY_SIZE_SHIFT);
2606         return 0;
2607 }
2608 
2609 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2610 {
2611         /* We need to keep the ABI specific field values */
2612         its->baser_coll_table &= ~GITS_BASER_VALID;
2613         its->baser_device_table &= ~GITS_BASER_VALID;
2614         its->cbaser = 0;
2615         its->creadr = 0;
2616         its->cwriter = 0;
2617         its->enabled = 0;
2618         vgic_its_free_device_list(kvm, its);
2619         vgic_its_free_collection_list(kvm, its);
2620 }
2621 
2622 static int vgic_its_has_attr(struct kvm_device *dev,
2623                              struct kvm_device_attr *attr)
2624 {
2625         switch (attr->group) {
2626         case KVM_DEV_ARM_VGIC_GRP_ADDR:
2627                 switch (attr->attr) {
2628                 case KVM_VGIC_ITS_ADDR_TYPE:
2629                         return 0;
2630                 }
2631                 break;
2632         case KVM_DEV_ARM_VGIC_GRP_CTRL:
2633                 switch (attr->attr) {
2634                 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2635                         return 0;
2636                 case KVM_DEV_ARM_ITS_CTRL_RESET:
2637                         return 0;
2638                 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2639                         return 0;
2640                 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2641                         return 0;
2642                 }
2643                 break;
2644         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2645                 return vgic_its_has_attr_regs(dev, attr);
2646         }
2647         return -ENXIO;
2648 }
2649 
2650 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2651 {
2652         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2653         int ret = 0;
2654 
2655         if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2656                 return 0;
2657 
2658         mutex_lock(&kvm->lock);
2659 
2660         if (!lock_all_vcpus(kvm)) {
2661                 mutex_unlock(&kvm->lock);
2662                 return -EBUSY;
2663         }
2664 
2665         mutex_lock(&kvm->arch.config_lock);
2666         mutex_lock(&its->its_lock);
2667 
2668         switch (attr) {
2669         case KVM_DEV_ARM_ITS_CTRL_RESET:
2670                 vgic_its_reset(kvm, its);
2671                 break;
2672         case KVM_DEV_ARM_ITS_SAVE_TABLES:
2673                 ret = abi->save_tables(its);
2674                 break;
2675         case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2676                 ret = abi->restore_tables(its);
2677                 break;
2678         }
2679 
2680         mutex_unlock(&its->its_lock);
2681         mutex_unlock(&kvm->arch.config_lock);
2682         unlock_all_vcpus(kvm);
2683         mutex_unlock(&kvm->lock);
2684         return ret;
2685 }
2686 
2687 /*
2688  * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2689  * without the running VCPU when dirty ring is enabled.
2690  *
2691  * The running VCPU is required to track dirty guest pages when dirty ring
2692  * is enabled. Otherwise, the backup bitmap should be used to track the
2693  * dirty guest pages. When vgic/its tables are being saved, the backup
2694  * bitmap is used to track the dirty guest pages due to the missed running
2695  * VCPU in the period.
2696  */
2697 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
2698 {
2699         struct vgic_dist *dist = &kvm->arch.vgic;
2700 
2701         return dist->table_write_in_progress;
2702 }
2703 
2704 static int vgic_its_set_attr(struct kvm_device *dev,
2705                              struct kvm_device_attr *attr)
2706 {
2707         struct vgic_its *its = dev->private;
2708         int ret;
2709 
2710         switch (attr->group) {
2711         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2712                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2713                 unsigned long type = (unsigned long)attr->attr;
2714                 u64 addr;
2715 
2716                 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2717                         return -ENODEV;
2718 
2719                 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2720                         return -EFAULT;
2721 
2722                 ret = vgic_check_iorange(dev->kvm, its->vgic_its_base,
2723                                          addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE);
2724                 if (ret)
2725                         return ret;
2726 
2727                 return vgic_register_its_iodev(dev->kvm, its, addr);
2728         }
2729         case KVM_DEV_ARM_VGIC_GRP_CTRL:
2730                 return vgic_its_ctrl(dev->kvm, its, attr->attr);
2731         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2732                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2733                 u64 reg;
2734 
2735                 if (get_user(reg, uaddr))
2736                         return -EFAULT;
2737 
2738                 return vgic_its_attr_regs_access(dev, attr, &reg, true);
2739         }
2740         }
2741         return -ENXIO;
2742 }
2743 
2744 static int vgic_its_get_attr(struct kvm_device *dev,
2745                              struct kvm_device_attr *attr)
2746 {
2747         switch (attr->group) {
2748         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2749                 struct vgic_its *its = dev->private;
2750                 u64 addr = its->vgic_its_base;
2751                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2752                 unsigned long type = (unsigned long)attr->attr;
2753 
2754                 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2755                         return -ENODEV;
2756 
2757                 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2758                         return -EFAULT;
2759                 break;
2760         }
2761         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2762                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2763                 u64 reg;
2764                 int ret;
2765 
2766                 ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2767                 if (ret)
2768                         return ret;
2769                 return put_user(reg, uaddr);
2770         }
2771         default:
2772                 return -ENXIO;
2773         }
2774 
2775         return 0;
2776 }
2777 
2778 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2779         .name = "kvm-arm-vgic-its",
2780         .create = vgic_its_create,
2781         .destroy = vgic_its_destroy,
2782         .set_attr = vgic_its_set_attr,
2783         .get_attr = vgic_its_get_attr,
2784         .has_attr = vgic_its_has_attr,
2785 };
2786 
2787 int kvm_vgic_register_its_device(void)
2788 {
2789         return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2790                                        KVM_DEV_TYPE_ARM_VGIC_ITS);
2791 }
2792 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php