~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/platform/uv/uv_irq.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * SGI UV IRQ functions
  7  *
  8  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
  9  */
 10 
 11 #include <linux/export.h>
 12 #include <linux/rbtree.h>
 13 #include <linux/slab.h>
 14 #include <linux/irq.h>
 15 
 16 #include <asm/irqdomain.h>
 17 #include <asm/apic.h>
 18 #include <asm/uv/uv_irq.h>
 19 #include <asm/uv/uv_hub.h>
 20 
 21 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
 22 struct uv_irq_2_mmr_pnode {
 23         unsigned long           offset;
 24         int                     pnode;
 25 };
 26 
 27 static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
 28 {
 29         unsigned long mmr_value;
 30         struct uv_IO_APIC_route_entry *entry;
 31 
 32         BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
 33                      sizeof(unsigned long));
 34 
 35         mmr_value = 0;
 36         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
 37         entry->vector           = cfg->vector;
 38         entry->delivery_mode    = APIC_DELIVERY_MODE_FIXED;
 39         entry->dest_mode        = apic->dest_mode_logical;
 40         entry->polarity         = 0;
 41         entry->trigger          = 0;
 42         entry->mask             = 0;
 43         entry->dest             = cfg->dest_apicid;
 44 
 45         uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
 46 }
 47 
 48 static void uv_noop(struct irq_data *data) { }
 49 
 50 static int
 51 uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
 52                     bool force)
 53 {
 54         struct irq_data *parent = data->parent_data;
 55         struct irq_cfg *cfg = irqd_cfg(data);
 56         int ret;
 57 
 58         ret = parent->chip->irq_set_affinity(parent, mask, force);
 59         if (ret >= 0) {
 60                 uv_program_mmr(cfg, data->chip_data);
 61                 vector_schedule_cleanup(cfg);
 62         }
 63 
 64         return ret;
 65 }
 66 
 67 static struct irq_chip uv_irq_chip = {
 68         .name                   = "UV-CORE",
 69         .irq_mask               = uv_noop,
 70         .irq_unmask             = uv_noop,
 71         .irq_eoi                = apic_ack_irq,
 72         .irq_set_affinity       = uv_set_irq_affinity,
 73 };
 74 
 75 static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
 76                            unsigned int nr_irqs, void *arg)
 77 {
 78         struct uv_irq_2_mmr_pnode *chip_data;
 79         struct irq_alloc_info *info = arg;
 80         struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
 81         int ret;
 82 
 83         if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
 84                 return -EINVAL;
 85 
 86         chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
 87                                  irq_data_get_node(irq_data));
 88         if (!chip_data)
 89                 return -ENOMEM;
 90 
 91         ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
 92         if (ret >= 0) {
 93                 if (info->uv.limit == UV_AFFINITY_CPU)
 94                         irq_set_status_flags(virq, IRQ_NO_BALANCING);
 95                 else
 96                         irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
 97 
 98                 chip_data->pnode = uv_blade_to_pnode(info->uv.blade);
 99                 chip_data->offset = info->uv.offset;
100                 irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
101                                     handle_percpu_irq, NULL, info->uv.name);
102         } else {
103                 kfree(chip_data);
104         }
105 
106         return ret;
107 }
108 
109 static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
110                            unsigned int nr_irqs)
111 {
112         struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
113 
114         BUG_ON(nr_irqs != 1);
115         kfree(irq_data->chip_data);
116         irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
117         irq_clear_status_flags(virq, IRQ_NO_BALANCING);
118         irq_domain_free_irqs_top(domain, virq, nr_irqs);
119 }
120 
121 /*
122  * Re-target the irq to the specified CPU and enable the specified MMR located
123  * on the specified blade to allow the sending of MSIs to the specified CPU.
124  */
125 static int uv_domain_activate(struct irq_domain *domain,
126                               struct irq_data *irq_data, bool reserve)
127 {
128         uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
129         return 0;
130 }
131 
132 /*
133  * Disable the specified MMR located on the specified blade so that MSIs are
134  * longer allowed to be sent.
135  */
136 static void uv_domain_deactivate(struct irq_domain *domain,
137                                  struct irq_data *irq_data)
138 {
139         unsigned long mmr_value;
140         struct uv_IO_APIC_route_entry *entry;
141 
142         mmr_value = 0;
143         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
144         entry->mask = 1;
145         uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
146 }
147 
148 static const struct irq_domain_ops uv_domain_ops = {
149         .alloc          = uv_domain_alloc,
150         .free           = uv_domain_free,
151         .activate       = uv_domain_activate,
152         .deactivate     = uv_domain_deactivate,
153 };
154 
155 static struct irq_domain *uv_get_irq_domain(void)
156 {
157         static struct irq_domain *uv_domain;
158         static DEFINE_MUTEX(uv_lock);
159         struct fwnode_handle *fn;
160 
161         mutex_lock(&uv_lock);
162         if (uv_domain)
163                 goto out;
164 
165         fn = irq_domain_alloc_named_fwnode("UV-CORE");
166         if (!fn)
167                 goto out;
168 
169         uv_domain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, fn,
170                                                 &uv_domain_ops, NULL);
171         if (!uv_domain)
172                 irq_domain_free_fwnode(fn);
173 out:
174         mutex_unlock(&uv_lock);
175 
176         return uv_domain;
177 }
178 
179 /*
180  * Set up a mapping of an available irq and vector, and enable the specified
181  * MMR that defines the MSI that is to be sent to the specified CPU when an
182  * interrupt is raised.
183  */
184 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
185                  unsigned long mmr_offset, int limit)
186 {
187         struct irq_alloc_info info;
188         struct irq_domain *domain = uv_get_irq_domain();
189 
190         if (!domain)
191                 return -ENOMEM;
192 
193         init_irq_alloc_info(&info, cpumask_of(cpu));
194         info.type = X86_IRQ_ALLOC_TYPE_UV;
195         info.uv.limit = limit;
196         info.uv.blade = mmr_blade;
197         info.uv.offset = mmr_offset;
198         info.uv.name = irq_name;
199 
200         return irq_domain_alloc_irqs(domain, 1,
201                                      uv_blade_to_memory_nid(mmr_blade), &info);
202 }
203 EXPORT_SYMBOL_GPL(uv_setup_irq);
204 
205 /*
206  * Tear down a mapping of an irq and vector, and disable the specified MMR that
207  * defined the MSI that was to be sent to the specified CPU when an interrupt
208  * was raised.
209  *
210  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
211  */
212 void uv_teardown_irq(unsigned int irq)
213 {
214         irq_domain_free_irqs(irq, 1);
215 }
216 EXPORT_SYMBOL_GPL(uv_teardown_irq);
217 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php