~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/dsa/tag_sja1105.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  3  */
  4 #include <linux/if_vlan.h>
  5 #include <linux/dsa/sja1105.h>
  6 #include <linux/dsa/8021q.h>
  7 #include <linux/packing.h>
  8 
  9 #include "tag.h"
 10 #include "tag_8021q.h"
 11 
 12 #define SJA1105_NAME                            "sja1105"
 13 #define SJA1110_NAME                            "sja1110"
 14 
 15 /* Is this a TX or an RX header? */
 16 #define SJA1110_HEADER_HOST_TO_SWITCH           BIT(15)
 17 
 18 /* RX header */
 19 #define SJA1110_RX_HEADER_IS_METADATA           BIT(14)
 20 #define SJA1110_RX_HEADER_HOST_ONLY             BIT(13)
 21 #define SJA1110_RX_HEADER_HAS_TRAILER           BIT(12)
 22 
 23 /* Trap-to-host format (no trailer present) */
 24 #define SJA1110_RX_HEADER_SRC_PORT(x)           (((x) & GENMASK(7, 4)) >> 4)
 25 #define SJA1110_RX_HEADER_SWITCH_ID(x)          ((x) & GENMASK(3, 0))
 26 
 27 /* Timestamp format (trailer present) */
 28 #define SJA1110_RX_HEADER_TRAILER_POS(x)        ((x) & GENMASK(11, 0))
 29 
 30 #define SJA1110_RX_TRAILER_SWITCH_ID(x)         (((x) & GENMASK(7, 4)) >> 4)
 31 #define SJA1110_RX_TRAILER_SRC_PORT(x)          ((x) & GENMASK(3, 0))
 32 
 33 /* Meta frame format (for 2-step TX timestamps) */
 34 #define SJA1110_RX_HEADER_N_TS(x)               (((x) & GENMASK(8, 4)) >> 4)
 35 
 36 /* TX header */
 37 #define SJA1110_TX_HEADER_UPDATE_TC             BIT(14)
 38 #define SJA1110_TX_HEADER_TAKE_TS               BIT(13)
 39 #define SJA1110_TX_HEADER_TAKE_TS_CASC          BIT(12)
 40 #define SJA1110_TX_HEADER_HAS_TRAILER           BIT(11)
 41 
 42 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
 43 #define SJA1110_TX_HEADER_PRIO(x)               (((x) << 7) & GENMASK(10, 7))
 44 #define SJA1110_TX_HEADER_TSTAMP_ID(x)          ((x) & GENMASK(7, 0))
 45 
 46 /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
 47 #define SJA1110_TX_HEADER_TRAILER_POS(x)        ((x) & GENMASK(10, 0))
 48 
 49 #define SJA1110_TX_TRAILER_TSTAMP_ID(x)         (((x) << 24) & GENMASK(31, 24))
 50 #define SJA1110_TX_TRAILER_PRIO(x)              (((x) << 21) & GENMASK(23, 21))
 51 #define SJA1110_TX_TRAILER_SWITCHID(x)          (((x) << 12) & GENMASK(15, 12))
 52 #define SJA1110_TX_TRAILER_DESTPORTS(x)         (((x) << 1) & GENMASK(11, 1))
 53 
 54 #define SJA1110_META_TSTAMP_SIZE                10
 55 
 56 #define SJA1110_HEADER_LEN                      4
 57 #define SJA1110_RX_TRAILER_LEN                  13
 58 #define SJA1110_TX_TRAILER_LEN                  4
 59 #define SJA1110_MAX_PADDING_LEN                 15
 60 
 61 struct sja1105_tagger_private {
 62         struct sja1105_tagger_data data; /* Must be first */
 63         /* Protects concurrent access to the meta state machine
 64          * from taggers running on multiple ports on SMP systems
 65          */
 66         spinlock_t meta_lock;
 67         struct sk_buff *stampable_skb;
 68         struct kthread_worker *xmit_worker;
 69 };
 70 
 71 static struct sja1105_tagger_private *
 72 sja1105_tagger_private(struct dsa_switch *ds)
 73 {
 74         return ds->tagger_data;
 75 }
 76 
 77 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
 78 static bool sja1105_is_link_local(const struct sk_buff *skb)
 79 {
 80         const struct ethhdr *hdr = eth_hdr(skb);
 81         u64 dmac = ether_addr_to_u64(hdr->h_dest);
 82 
 83         if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
 84                 return false;
 85         if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
 86                     SJA1105_LINKLOCAL_FILTER_A)
 87                 return true;
 88         if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
 89                     SJA1105_LINKLOCAL_FILTER_B)
 90                 return true;
 91         return false;
 92 }
 93 
 94 struct sja1105_meta {
 95         u64 tstamp;
 96         u64 dmac_byte_4;
 97         u64 dmac_byte_3;
 98         u64 source_port;
 99         u64 switch_id;
100 };
101 
102 static void sja1105_meta_unpack(const struct sk_buff *skb,
103                                 struct sja1105_meta *meta)
104 {
105         u8 *buf = skb_mac_header(skb) + ETH_HLEN;
106 
107         /* UM10944.pdf section 4.2.17 AVB Parameters:
108          * Structure of the meta-data follow-up frame.
109          * It is in network byte order, so there are no quirks
110          * while unpacking the meta frame.
111          *
112          * Also SJA1105 E/T only populates bits 23:0 of the timestamp
113          * whereas P/Q/R/S does 32 bits. Since the structure is the
114          * same and the E/T puts zeroes in the high-order byte, use
115          * a unified unpacking command for both device series.
116          */
117         packing(buf,     &meta->tstamp,     31, 0, 4, UNPACK, 0);
118         packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
119         packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
120         packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
121         packing(buf + 7, &meta->switch_id,   7, 0, 1, UNPACK, 0);
122 }
123 
124 static bool sja1105_is_meta_frame(const struct sk_buff *skb)
125 {
126         const struct ethhdr *hdr = eth_hdr(skb);
127         u64 smac = ether_addr_to_u64(hdr->h_source);
128         u64 dmac = ether_addr_to_u64(hdr->h_dest);
129 
130         if (smac != SJA1105_META_SMAC)
131                 return false;
132         if (dmac != SJA1105_META_DMAC)
133                 return false;
134         if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
135                 return false;
136         return true;
137 }
138 
139 /* Calls sja1105_port_deferred_xmit in sja1105_main.c */
140 static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
141                                           struct sk_buff *skb)
142 {
143         struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
144         struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
145         void (*xmit_work_fn)(struct kthread_work *work);
146         struct sja1105_deferred_xmit_work *xmit_work;
147         struct kthread_worker *xmit_worker;
148 
149         xmit_work_fn = tagger_data->xmit_work_fn;
150         xmit_worker = priv->xmit_worker;
151 
152         if (!xmit_work_fn || !xmit_worker)
153                 return NULL;
154 
155         xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
156         if (!xmit_work)
157                 return NULL;
158 
159         kthread_init_work(&xmit_work->work, xmit_work_fn);
160         /* Increase refcount so the kfree_skb in dsa_user_xmit
161          * won't really free the packet.
162          */
163         xmit_work->dp = dp;
164         xmit_work->skb = skb_get(skb);
165 
166         kthread_queue_work(xmit_worker, &xmit_work->work);
167 
168         return NULL;
169 }
170 
171 /* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
172  * bridge spanning ports of this switch might have.
173  */
174 static u16 sja1105_xmit_tpid(struct dsa_port *dp)
175 {
176         struct dsa_switch *ds = dp->ds;
177         struct dsa_port *other_dp;
178         u16 proto;
179 
180         /* Since VLAN awareness is global, then if this port is VLAN-unaware,
181          * all ports are. Use the VLAN-unaware TPID used for tag_8021q.
182          */
183         if (!dsa_port_is_vlan_filtering(dp))
184                 return ETH_P_SJA1105;
185 
186         /* Port is VLAN-aware, so there is a bridge somewhere (a single one,
187          * we're sure about that). It may not be on this port though, so we
188          * need to find it.
189          */
190         dsa_switch_for_each_port(other_dp, ds) {
191                 struct net_device *br = dsa_port_bridge_dev_get(other_dp);
192 
193                 if (!br)
194                         continue;
195 
196                 /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
197                  * which seems pointless to handle, as our port cannot become
198                  * VLAN-aware in that case.
199                  */
200                 br_vlan_get_proto(br, &proto);
201 
202                 return proto;
203         }
204 
205         WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
206 
207         return ETH_P_SJA1105;
208 }
209 
210 static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
211                                               struct net_device *netdev)
212 {
213         struct dsa_port *dp = dsa_user_to_port(netdev);
214         unsigned int bridge_num = dsa_port_bridge_num_get(dp);
215         struct net_device *br = dsa_port_bridge_dev_get(dp);
216         u16 tx_vid;
217 
218         /* If the port is under a VLAN-aware bridge, just slide the
219          * VLAN-tagged packet into the FDB and hope for the best.
220          * This works because we support a single VLAN-aware bridge
221          * across the entire dst, and its VLANs cannot be shared with
222          * any standalone port.
223          */
224         if (br_vlan_enabled(br))
225                 return skb;
226 
227         /* If the port is under a VLAN-unaware bridge, use an imprecise
228          * TX VLAN that targets the bridge's entire broadcast domain,
229          * instead of just the specific port.
230          */
231         tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
232 
233         return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
234 }
235 
236 /* Transform untagged control packets into pvid-tagged control packets so that
237  * all packets sent by this tagger are VLAN-tagged and we can configure the
238  * switch to drop untagged packets coming from the DSA conduit.
239  */
240 static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
241                                                     struct sk_buff *skb, u8 pcp)
242 {
243         __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
244         struct vlan_ethhdr *hdr;
245 
246         /* If VLAN tag is in hwaccel area, move it to the payload
247          * to deal with both cases uniformly and to ensure that
248          * the VLANs are added in the right order.
249          */
250         if (unlikely(skb_vlan_tag_present(skb))) {
251                 skb = __vlan_hwaccel_push_inside(skb);
252                 if (!skb)
253                         return NULL;
254         }
255 
256         hdr = skb_vlan_eth_hdr(skb);
257 
258         /* If skb is already VLAN-tagged, leave that VLAN ID in place */
259         if (hdr->h_vlan_proto == xmit_tpid)
260                 return skb;
261 
262         return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
263                                SJA1105_DEFAULT_VLAN);
264 }
265 
266 static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
267                                     struct net_device *netdev)
268 {
269         struct dsa_port *dp = dsa_user_to_port(netdev);
270         u16 queue_mapping = skb_get_queue_mapping(skb);
271         u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
272         u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
273 
274         if (skb->offload_fwd_mark)
275                 return sja1105_imprecise_xmit(skb, netdev);
276 
277         /* Transmitting management traffic does not rely upon switch tagging,
278          * but instead SPI-installed management routes. Part 2 of this
279          * is the .port_deferred_xmit driver callback.
280          */
281         if (unlikely(sja1105_is_link_local(skb))) {
282                 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
283                 if (!skb)
284                         return NULL;
285 
286                 return sja1105_defer_xmit(dp, skb);
287         }
288 
289         return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
290                              ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
291 }
292 
293 static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
294                                     struct net_device *netdev)
295 {
296         struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
297         struct dsa_port *dp = dsa_user_to_port(netdev);
298         u16 queue_mapping = skb_get_queue_mapping(skb);
299         u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
300         u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
301         __be32 *tx_trailer;
302         __be16 *tx_header;
303         int trailer_pos;
304 
305         if (skb->offload_fwd_mark)
306                 return sja1105_imprecise_xmit(skb, netdev);
307 
308         /* Transmitting control packets is done using in-band control
309          * extensions, while data packets are transmitted using
310          * tag_8021q TX VLANs.
311          */
312         if (likely(!sja1105_is_link_local(skb)))
313                 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
314                                      ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
315 
316         skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
317         if (!skb)
318                 return NULL;
319 
320         skb_push(skb, SJA1110_HEADER_LEN);
321 
322         dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
323 
324         trailer_pos = skb->len;
325 
326         tx_header = dsa_etype_header_pos_tx(skb);
327         tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
328 
329         tx_header[0] = htons(ETH_P_SJA1110);
330         tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
331                              SJA1110_TX_HEADER_HAS_TRAILER |
332                              SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
333         *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
334                                   SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
335                                   SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
336         if (clone) {
337                 u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
338 
339                 tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
340                 *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
341         }
342 
343         return skb;
344 }
345 
346 static void sja1105_transfer_meta(struct sk_buff *skb,
347                                   const struct sja1105_meta *meta)
348 {
349         struct ethhdr *hdr = eth_hdr(skb);
350 
351         hdr->h_dest[3] = meta->dmac_byte_3;
352         hdr->h_dest[4] = meta->dmac_byte_4;
353         SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
354 }
355 
356 /* This is a simple state machine which follows the hardware mechanism of
357  * generating RX timestamps:
358  *
359  * After each timestampable skb (all traffic for which send_meta1 and
360  * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
361  * containing a partial timestamp is immediately generated by the switch and
362  * sent as a follow-up to the link-local frame on the CPU port.
363  *
364  * The meta frames have no unique identifier (such as sequence number) by which
365  * one may pair them to the correct timestampable frame.
366  * Instead, the switch has internal logic that ensures no frames are sent on
367  * the CPU port between a link-local timestampable frame and its corresponding
368  * meta follow-up. It also ensures strict ordering between ports (lower ports
369  * have higher priority towards the CPU port). For this reason, a per-port
370  * data structure is not needed/desirable.
371  *
372  * This function pairs the link-local frame with its partial timestamp from the
373  * meta follow-up frame. The full timestamp will be reconstructed later in a
374  * work queue.
375  */
376 static struct sk_buff
377 *sja1105_rcv_meta_state_machine(struct sk_buff *skb,
378                                 struct sja1105_meta *meta,
379                                 bool is_link_local,
380                                 bool is_meta)
381 {
382         /* Step 1: A timestampable frame was received.
383          * Buffer it until we get its meta frame.
384          */
385         if (is_link_local) {
386                 struct dsa_port *dp = dsa_user_to_port(skb->dev);
387                 struct sja1105_tagger_private *priv;
388                 struct dsa_switch *ds = dp->ds;
389 
390                 priv = sja1105_tagger_private(ds);
391 
392                 spin_lock(&priv->meta_lock);
393                 /* Was this a link-local frame instead of the meta
394                  * that we were expecting?
395                  */
396                 if (priv->stampable_skb) {
397                         dev_err_ratelimited(ds->dev,
398                                             "Expected meta frame, is %12llx "
399                                             "in the DSA conduit multicast filter?\n",
400                                             SJA1105_META_DMAC);
401                         kfree_skb(priv->stampable_skb);
402                 }
403 
404                 /* Hold a reference to avoid dsa_switch_rcv
405                  * from freeing the skb.
406                  */
407                 priv->stampable_skb = skb_get(skb);
408                 spin_unlock(&priv->meta_lock);
409 
410                 /* Tell DSA we got nothing */
411                 return NULL;
412 
413         /* Step 2: The meta frame arrived.
414          * Time to take the stampable skb out of the closet, annotate it
415          * with the partial timestamp, and pretend that we received it
416          * just now (basically masquerade the buffered frame as the meta
417          * frame, which serves no further purpose).
418          */
419         } else if (is_meta) {
420                 struct dsa_port *dp = dsa_user_to_port(skb->dev);
421                 struct sja1105_tagger_private *priv;
422                 struct dsa_switch *ds = dp->ds;
423                 struct sk_buff *stampable_skb;
424 
425                 priv = sja1105_tagger_private(ds);
426 
427                 spin_lock(&priv->meta_lock);
428 
429                 stampable_skb = priv->stampable_skb;
430                 priv->stampable_skb = NULL;
431 
432                 /* Was this a meta frame instead of the link-local
433                  * that we were expecting?
434                  */
435                 if (!stampable_skb) {
436                         dev_err_ratelimited(ds->dev,
437                                             "Unexpected meta frame\n");
438                         spin_unlock(&priv->meta_lock);
439                         return NULL;
440                 }
441 
442                 if (stampable_skb->dev != skb->dev) {
443                         dev_err_ratelimited(ds->dev,
444                                             "Meta frame on wrong port\n");
445                         spin_unlock(&priv->meta_lock);
446                         return NULL;
447                 }
448 
449                 /* Free the meta frame and give DSA the buffered stampable_skb
450                  * for further processing up the network stack.
451                  */
452                 kfree_skb(skb);
453                 skb = stampable_skb;
454                 sja1105_transfer_meta(skb, meta);
455 
456                 spin_unlock(&priv->meta_lock);
457         }
458 
459         return skb;
460 }
461 
462 static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
463 {
464         u16 tpid = ntohs(eth_hdr(skb)->h_proto);
465 
466         return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
467                skb_vlan_tag_present(skb);
468 }
469 
470 static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
471 {
472         return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
473 }
474 
475 static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
476                                    struct net_device *netdev)
477 {
478         int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
479         struct sja1105_meta meta = {0};
480         struct ethhdr *hdr;
481         bool is_link_local;
482         bool is_meta;
483 
484         hdr = eth_hdr(skb);
485         is_link_local = sja1105_is_link_local(skb);
486         is_meta = sja1105_is_meta_frame(skb);
487 
488         if (is_link_local) {
489                 /* Management traffic path. Switch embeds the switch ID and
490                  * port ID into bytes of the destination MAC, courtesy of
491                  * the incl_srcpt options.
492                  */
493                 source_port = hdr->h_dest[3];
494                 switch_id = hdr->h_dest[4];
495         } else if (is_meta) {
496                 sja1105_meta_unpack(skb, &meta);
497                 source_port = meta.source_port;
498                 switch_id = meta.switch_id;
499         }
500 
501         /* Normal data plane traffic and link-local frames are tagged with
502          * a tag_8021q VLAN which we have to strip
503          */
504         if (sja1105_skb_has_tag_8021q(skb))
505                 dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
506         else if (source_port == -1 && switch_id == -1)
507                 /* Packets with no source information have no chance of
508                  * getting accepted, drop them straight away.
509                  */
510                 return NULL;
511 
512         skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
513                                            vid, vbid);
514         if (!skb->dev) {
515                 netdev_warn(netdev, "Couldn't decode source port\n");
516                 return NULL;
517         }
518 
519         if (!is_link_local)
520                 dsa_default_offload_fwd_mark(skb);
521 
522         return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
523                                               is_meta);
524 }
525 
526 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
527 {
528         u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
529         int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
530         int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
531         struct sja1105_tagger_data *tagger_data;
532         struct net_device *conduit = skb->dev;
533         struct dsa_port *cpu_dp;
534         struct dsa_switch *ds;
535         int i;
536 
537         cpu_dp = conduit->dsa_ptr;
538         ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
539         if (!ds) {
540                 net_err_ratelimited("%s: cannot find switch id %d\n",
541                                     conduit->name, switch_id);
542                 return NULL;
543         }
544 
545         tagger_data = sja1105_tagger_data(ds);
546         if (!tagger_data->meta_tstamp_handler)
547                 return NULL;
548 
549         for (i = 0; i <= n_ts; i++) {
550                 u8 ts_id, source_port, dir;
551                 u64 tstamp;
552 
553                 ts_id = buf[0];
554                 source_port = (buf[1] & GENMASK(7, 4)) >> 4;
555                 dir = (buf[1] & BIT(3)) >> 3;
556                 tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
557 
558                 tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
559                                                  tstamp);
560 
561                 buf += SJA1110_META_TSTAMP_SIZE;
562         }
563 
564         /* Discard the meta frame, we've consumed the timestamps it contained */
565         return NULL;
566 }
567 
568 static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
569                                                             int *source_port,
570                                                             int *switch_id,
571                                                             bool *host_only)
572 {
573         u16 rx_header;
574 
575         if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
576                 return NULL;
577 
578         /* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
579          * what we need because the caller has checked the EtherType (which is
580          * located 2 bytes back) and we just need a pointer to the header that
581          * comes afterwards.
582          */
583         rx_header = ntohs(*(__be16 *)skb->data);
584 
585         if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
586                 *host_only = true;
587 
588         if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
589                 return sja1110_rcv_meta(skb, rx_header);
590 
591         /* Timestamp frame, we have a trailer */
592         if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
593                 int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
594                 u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
595                 u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
596                 u8 last_byte = rx_trailer[12];
597 
598                 /* The timestamp is unaligned, so we need to use packing()
599                  * to get it
600                  */
601                 packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
602 
603                 *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
604                 *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
605 
606                 /* skb->len counts from skb->data, while start_of_padding
607                  * counts from the destination MAC address. Right now skb->data
608                  * is still as set by the DSA conduit, so to trim away the
609                  * padding and trailer we need to account for the fact that
610                  * skb->data points to skb_mac_header(skb) + ETH_HLEN.
611                  */
612                 if (pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN))
613                         return NULL;
614         /* Trap-to-host frame, no timestamp trailer */
615         } else {
616                 *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
617                 *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
618         }
619 
620         /* Advance skb->data past the DSA header */
621         skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
622 
623         dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
624 
625         /* With skb->data in its final place, update the MAC header
626          * so that eth_hdr() continues to works properly.
627          */
628         skb_set_mac_header(skb, -ETH_HLEN);
629 
630         return skb;
631 }
632 
633 static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
634                                    struct net_device *netdev)
635 {
636         int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
637         bool host_only = false;
638 
639         if (sja1110_skb_has_inband_control_extension(skb)) {
640                 skb = sja1110_rcv_inband_control_extension(skb, &source_port,
641                                                            &switch_id,
642                                                            &host_only);
643                 if (!skb)
644                         return NULL;
645         }
646 
647         /* Packets with in-band control extensions might still have RX VLANs */
648         if (likely(sja1105_skb_has_tag_8021q(skb)))
649                 dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
650 
651         skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
652                                            vid, vbid);
653 
654         if (!skb->dev) {
655                 netdev_warn(netdev, "Couldn't decode source port\n");
656                 return NULL;
657         }
658 
659         if (!host_only)
660                 dsa_default_offload_fwd_mark(skb);
661 
662         return skb;
663 }
664 
665 static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
666                                  int *offset)
667 {
668         /* No tag added for management frames, all ok */
669         if (unlikely(sja1105_is_link_local(skb)))
670                 return;
671 
672         dsa_tag_generic_flow_dissect(skb, proto, offset);
673 }
674 
675 static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
676                                  int *offset)
677 {
678         /* Management frames have 2 DSA tags on RX, so the needed_headroom we
679          * declared is fine for the generic dissector adjustment procedure.
680          */
681         if (unlikely(sja1105_is_link_local(skb)))
682                 return dsa_tag_generic_flow_dissect(skb, proto, offset);
683 
684         /* For the rest, there is a single DSA tag, the tag_8021q one */
685         *offset = VLAN_HLEN;
686         *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
687 }
688 
689 static void sja1105_disconnect(struct dsa_switch *ds)
690 {
691         struct sja1105_tagger_private *priv = ds->tagger_data;
692 
693         kthread_destroy_worker(priv->xmit_worker);
694         kfree(priv);
695         ds->tagger_data = NULL;
696 }
697 
698 static int sja1105_connect(struct dsa_switch *ds)
699 {
700         struct sja1105_tagger_private *priv;
701         struct kthread_worker *xmit_worker;
702         int err;
703 
704         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
705         if (!priv)
706                 return -ENOMEM;
707 
708         spin_lock_init(&priv->meta_lock);
709 
710         xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
711                                             ds->dst->index, ds->index);
712         if (IS_ERR(xmit_worker)) {
713                 err = PTR_ERR(xmit_worker);
714                 kfree(priv);
715                 return err;
716         }
717 
718         priv->xmit_worker = xmit_worker;
719         ds->tagger_data = priv;
720 
721         return 0;
722 }
723 
724 static const struct dsa_device_ops sja1105_netdev_ops = {
725         .name = SJA1105_NAME,
726         .proto = DSA_TAG_PROTO_SJA1105,
727         .xmit = sja1105_xmit,
728         .rcv = sja1105_rcv,
729         .connect = sja1105_connect,
730         .disconnect = sja1105_disconnect,
731         .needed_headroom = VLAN_HLEN,
732         .flow_dissect = sja1105_flow_dissect,
733         .promisc_on_conduit = true,
734 };
735 
736 DSA_TAG_DRIVER(sja1105_netdev_ops);
737 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105, SJA1105_NAME);
738 
739 static const struct dsa_device_ops sja1110_netdev_ops = {
740         .name = SJA1110_NAME,
741         .proto = DSA_TAG_PROTO_SJA1110,
742         .xmit = sja1110_xmit,
743         .rcv = sja1110_rcv,
744         .connect = sja1105_connect,
745         .disconnect = sja1105_disconnect,
746         .flow_dissect = sja1110_flow_dissect,
747         .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
748         .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
749 };
750 
751 DSA_TAG_DRIVER(sja1110_netdev_ops);
752 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110, SJA1110_NAME);
753 
754 static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
755         &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
756         &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
757 };
758 
759 module_dsa_tag_drivers(sja1105_tag_driver_array);
760 
761 MODULE_DESCRIPTION("DSA tag driver for NXP SJA1105 switches");
762 MODULE_LICENSE("GPL v2");
763 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php