~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/tipc/link.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * net/tipc/link.c: TIPC link code
  3  *
  4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
  5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  6  * All rights reserved.
  7  *
  8  * Redistribution and use in source and binary forms, with or without
  9  * modification, are permitted provided that the following conditions are met:
 10  *
 11  * 1. Redistributions of source code must retain the above copyright
 12  *    notice, this list of conditions and the following disclaimer.
 13  * 2. Redistributions in binary form must reproduce the above copyright
 14  *    notice, this list of conditions and the following disclaimer in the
 15  *    documentation and/or other materials provided with the distribution.
 16  * 3. Neither the names of the copyright holders nor the names of its
 17  *    contributors may be used to endorse or promote products derived from
 18  *    this software without specific prior written permission.
 19  *
 20  * Alternatively, this software may be distributed under the terms of the
 21  * GNU General Public License ("GPL") version 2 as published by the Free
 22  * Software Foundation.
 23  *
 24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 34  * POSSIBILITY OF SUCH DAMAGE.
 35  */
 36 
 37 #include "core.h"
 38 #include "subscr.h"
 39 #include "link.h"
 40 #include "bcast.h"
 41 #include "socket.h"
 42 #include "name_distr.h"
 43 #include "discover.h"
 44 #include "netlink.h"
 45 #include "monitor.h"
 46 #include "trace.h"
 47 #include "crypto.h"
 48 
 49 #include <linux/pkt_sched.h>
 50 
 51 struct tipc_stats {
 52         u32 sent_pkts;
 53         u32 recv_pkts;
 54         u32 sent_states;
 55         u32 recv_states;
 56         u32 sent_probes;
 57         u32 recv_probes;
 58         u32 sent_nacks;
 59         u32 recv_nacks;
 60         u32 sent_acks;
 61         u32 sent_bundled;
 62         u32 sent_bundles;
 63         u32 recv_bundled;
 64         u32 recv_bundles;
 65         u32 retransmitted;
 66         u32 sent_fragmented;
 67         u32 sent_fragments;
 68         u32 recv_fragmented;
 69         u32 recv_fragments;
 70         u32 link_congs;         /* # port sends blocked by congestion */
 71         u32 deferred_recv;
 72         u32 duplicates;
 73         u32 max_queue_sz;       /* send queue size high water mark */
 74         u32 accu_queue_sz;      /* used for send queue size profiling */
 75         u32 queue_sz_counts;    /* used for send queue size profiling */
 76         u32 msg_length_counts;  /* used for message length profiling */
 77         u32 msg_lengths_total;  /* used for message length profiling */
 78         u32 msg_length_profile[7]; /* used for msg. length profiling */
 79 };
 80 
 81 /**
 82  * struct tipc_link - TIPC link data structure
 83  * @addr: network address of link's peer node
 84  * @name: link name character string
 85  * @net: pointer to namespace struct
 86  * @peer_session: link session # being used by peer end of link
 87  * @peer_bearer_id: bearer id used by link's peer endpoint
 88  * @bearer_id: local bearer id used by link
 89  * @tolerance: minimum link continuity loss needed to reset link [in ms]
 90  * @abort_limit: # of unacknowledged continuity probes needed to reset link
 91  * @state: current state of link FSM
 92  * @peer_caps: bitmap describing capabilities of peer node
 93  * @silent_intv_cnt: # of timer intervals without any reception from peer
 94  * @priority: current link priority
 95  * @net_plane: current link network plane ('A' through 'H')
 96  * @mon_state: cookie with information needed by link monitor
 97  * @mtu: current maximum packet size for this link
 98  * @advertised_mtu: advertised own mtu when link is being established
 99  * @backlogq: queue for messages waiting to be sent
100  * @ackers: # of peers that needs to ack each packet before it can be released
101  * @acked: # last packet acked by a certain peer. Used for broadcast.
102  * @rcv_nxt: next sequence number to expect for inbound messages
103  * @inputq: buffer queue for messages to be delivered upwards
104  * @namedq: buffer queue for name table messages to be delivered upwards
105  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
106  * @reasm_buf: head of partially reassembled inbound message fragments
107  * @stats: collects statistics regarding link activity
108  * @session: session to be used by link
109  * @snd_nxt_state: next send seq number
110  * @rcv_nxt_state: next rcv seq number
111  * @in_session: have received ACTIVATE_MSG from peer
112  * @active: link is active
113  * @if_name: associated interface name
114  * @rst_cnt: link reset counter
115  * @drop_point: seq number for failover handling (FIXME)
116  * @failover_reasm_skb: saved failover msg ptr (FIXME)
117  * @failover_deferdq: deferred message queue for failover processing (FIXME)
118  * @transmq: the link's transmit queue
119  * @backlog: link's backlog by priority (importance)
120  * @snd_nxt: next sequence number to be used
121  * @rcv_unacked: # messages read by user, but not yet acked back to peer
122  * @deferdq: deferred receive queue
123  * @window: sliding window size for congestion handling
124  * @min_win: minimal send window to be used by link
125  * @ssthresh: slow start threshold for congestion handling
126  * @max_win: maximal send window to be used by link
127  * @cong_acks: congestion acks for congestion avoidance (FIXME)
128  * @checkpoint: seq number for congestion window size handling
129  * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
130  * @last_gap: last gap ack blocks for bcast (FIXME)
131  * @last_ga: ptr to gap ack blocks
132  * @bc_rcvlink: the peer specific link used for broadcast reception
133  * @bc_sndlink: the namespace global link used for broadcast sending
134  * @nack_state: bcast nack state
135  * @bc_peer_is_up: peer has acked the bcast init msg
136  */
137 struct tipc_link {
138         u32 addr;
139         char name[TIPC_MAX_LINK_NAME];
140         struct net *net;
141 
142         /* Management and link supervision data */
143         u16 peer_session;
144         u16 session;
145         u16 snd_nxt_state;
146         u16 rcv_nxt_state;
147         u32 peer_bearer_id;
148         u32 bearer_id;
149         u32 tolerance;
150         u32 abort_limit;
151         u32 state;
152         u16 peer_caps;
153         bool in_session;
154         bool active;
155         u32 silent_intv_cnt;
156         char if_name[TIPC_MAX_IF_NAME];
157         u32 priority;
158         char net_plane;
159         struct tipc_mon_state mon_state;
160         u16 rst_cnt;
161 
162         /* Failover/synch */
163         u16 drop_point;
164         struct sk_buff *failover_reasm_skb;
165         struct sk_buff_head failover_deferdq;
166 
167         /* Max packet negotiation */
168         u16 mtu;
169         u16 advertised_mtu;
170 
171         /* Sending */
172         struct sk_buff_head transmq;
173         struct sk_buff_head backlogq;
174         struct {
175                 u16 len;
176                 u16 limit;
177                 struct sk_buff *target_bskb;
178         } backlog[5];
179         u16 snd_nxt;
180 
181         /* Reception */
182         u16 rcv_nxt;
183         u32 rcv_unacked;
184         struct sk_buff_head deferdq;
185         struct sk_buff_head *inputq;
186         struct sk_buff_head *namedq;
187 
188         /* Congestion handling */
189         struct sk_buff_head wakeupq;
190         u16 window;
191         u16 min_win;
192         u16 ssthresh;
193         u16 max_win;
194         u16 cong_acks;
195         u16 checkpoint;
196 
197         /* Fragmentation/reassembly */
198         struct sk_buff *reasm_buf;
199         struct sk_buff *reasm_tnlmsg;
200 
201         /* Broadcast */
202         u16 ackers;
203         u16 acked;
204         u16 last_gap;
205         struct tipc_gap_ack_blks *last_ga;
206         struct tipc_link *bc_rcvlink;
207         struct tipc_link *bc_sndlink;
208         u8 nack_state;
209         bool bc_peer_is_up;
210 
211         /* Statistics */
212         struct tipc_stats stats;
213 };
214 
215 /*
216  * Error message prefixes
217  */
218 static const char *link_co_err = "Link tunneling error, ";
219 static const char *link_rst_msg = "Resetting link ";
220 
221 /* Send states for broadcast NACKs
222  */
223 enum {
224         BC_NACK_SND_CONDITIONAL,
225         BC_NACK_SND_UNCONDITIONAL,
226         BC_NACK_SND_SUPPRESS,
227 };
228 
229 #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
230 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
231 
232 /* Link FSM states:
233  */
234 enum {
235         LINK_ESTABLISHED     = 0xe,
236         LINK_ESTABLISHING    = 0xe  << 4,
237         LINK_RESET           = 0x1  << 8,
238         LINK_RESETTING       = 0x2  << 12,
239         LINK_PEER_RESET      = 0xd  << 16,
240         LINK_FAILINGOVER     = 0xf  << 20,
241         LINK_SYNCHING        = 0xc  << 24
242 };
243 
244 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
245                                struct sk_buff_head *xmitq);
246 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
247                                       bool probe_reply, u16 rcvgap,
248                                       int tolerance, int priority,
249                                       struct sk_buff_head *xmitq);
250 static void link_print(struct tipc_link *l, const char *str);
251 static int tipc_link_build_nack_msg(struct tipc_link *l,
252                                     struct sk_buff_head *xmitq);
253 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
254                                         struct sk_buff_head *xmitq);
255 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
256                                     struct tipc_link *l, u8 start_index);
257 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
258 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
259                                      u16 acked, u16 gap,
260                                      struct tipc_gap_ack_blks *ga,
261                                      struct sk_buff_head *xmitq,
262                                      bool *retransmitted, int *rc);
263 static void tipc_link_update_cwin(struct tipc_link *l, int released,
264                                   bool retransmitted);
265 /*
266  *  Simple non-static link routines (i.e. referenced outside this file)
267  */
268 bool tipc_link_is_up(struct tipc_link *l)
269 {
270         return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
271 }
272 
273 bool tipc_link_peer_is_down(struct tipc_link *l)
274 {
275         return l->state == LINK_PEER_RESET;
276 }
277 
278 bool tipc_link_is_reset(struct tipc_link *l)
279 {
280         return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
281 }
282 
283 bool tipc_link_is_establishing(struct tipc_link *l)
284 {
285         return l->state == LINK_ESTABLISHING;
286 }
287 
288 bool tipc_link_is_synching(struct tipc_link *l)
289 {
290         return l->state == LINK_SYNCHING;
291 }
292 
293 bool tipc_link_is_failingover(struct tipc_link *l)
294 {
295         return l->state == LINK_FAILINGOVER;
296 }
297 
298 bool tipc_link_is_blocked(struct tipc_link *l)
299 {
300         return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
301 }
302 
303 static bool link_is_bc_sndlink(struct tipc_link *l)
304 {
305         return !l->bc_sndlink;
306 }
307 
308 static bool link_is_bc_rcvlink(struct tipc_link *l)
309 {
310         return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
311 }
312 
313 void tipc_link_set_active(struct tipc_link *l, bool active)
314 {
315         l->active = active;
316 }
317 
318 u32 tipc_link_id(struct tipc_link *l)
319 {
320         return l->peer_bearer_id << 16 | l->bearer_id;
321 }
322 
323 int tipc_link_min_win(struct tipc_link *l)
324 {
325         return l->min_win;
326 }
327 
328 int tipc_link_max_win(struct tipc_link *l)
329 {
330         return l->max_win;
331 }
332 
333 int tipc_link_prio(struct tipc_link *l)
334 {
335         return l->priority;
336 }
337 
338 unsigned long tipc_link_tolerance(struct tipc_link *l)
339 {
340         return l->tolerance;
341 }
342 
343 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
344 {
345         return l->inputq;
346 }
347 
348 char tipc_link_plane(struct tipc_link *l)
349 {
350         return l->net_plane;
351 }
352 
353 struct net *tipc_link_net(struct tipc_link *l)
354 {
355         return l->net;
356 }
357 
358 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
359 {
360         l->peer_caps = capabilities;
361 }
362 
363 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
364                            struct tipc_link *uc_l,
365                            struct sk_buff_head *xmitq)
366 {
367         struct tipc_link *rcv_l = uc_l->bc_rcvlink;
368 
369         snd_l->ackers++;
370         rcv_l->acked = snd_l->snd_nxt - 1;
371         snd_l->state = LINK_ESTABLISHED;
372         tipc_link_build_bc_init_msg(uc_l, xmitq);
373 }
374 
375 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
376                               struct tipc_link *rcv_l,
377                               struct sk_buff_head *xmitq)
378 {
379         u16 ack = snd_l->snd_nxt - 1;
380 
381         snd_l->ackers--;
382         rcv_l->bc_peer_is_up = true;
383         rcv_l->state = LINK_ESTABLISHED;
384         tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
385         trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
386         tipc_link_reset(rcv_l);
387         rcv_l->state = LINK_RESET;
388         if (!snd_l->ackers) {
389                 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
390                 tipc_link_reset(snd_l);
391                 snd_l->state = LINK_RESET;
392                 __skb_queue_purge(xmitq);
393         }
394 }
395 
396 int tipc_link_bc_peers(struct tipc_link *l)
397 {
398         return l->ackers;
399 }
400 
401 static u16 link_bc_rcv_gap(struct tipc_link *l)
402 {
403         struct sk_buff *skb = skb_peek(&l->deferdq);
404         u16 gap = 0;
405 
406         if (more(l->snd_nxt, l->rcv_nxt))
407                 gap = l->snd_nxt - l->rcv_nxt;
408         if (skb)
409                 gap = buf_seqno(skb) - l->rcv_nxt;
410         return gap;
411 }
412 
413 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
414 {
415         l->mtu = mtu;
416 }
417 
418 int tipc_link_mtu(struct tipc_link *l)
419 {
420         return l->mtu;
421 }
422 
423 int tipc_link_mss(struct tipc_link *l)
424 {
425 #ifdef CONFIG_TIPC_CRYPTO
426         return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
427 #else
428         return l->mtu - INT_H_SIZE;
429 #endif
430 }
431 
432 u16 tipc_link_rcv_nxt(struct tipc_link *l)
433 {
434         return l->rcv_nxt;
435 }
436 
437 u16 tipc_link_acked(struct tipc_link *l)
438 {
439         return l->acked;
440 }
441 
442 char *tipc_link_name(struct tipc_link *l)
443 {
444         return l->name;
445 }
446 
447 u32 tipc_link_state(struct tipc_link *l)
448 {
449         return l->state;
450 }
451 
452 /**
453  * tipc_link_create - create a new link
454  * @net: pointer to associated network namespace
455  * @if_name: associated interface name
456  * @bearer_id: id (index) of associated bearer
457  * @tolerance: link tolerance to be used by link
458  * @net_plane: network plane (A,B,c..) this link belongs to
459  * @mtu: mtu to be advertised by link
460  * @priority: priority to be used by link
461  * @min_win: minimal send window to be used by link
462  * @max_win: maximal send window to be used by link
463  * @session: session to be used by link
464  * @peer: node id of peer node
465  * @peer_caps: bitmap describing peer node capabilities
466  * @bc_sndlink: the namespace global link used for broadcast sending
467  * @bc_rcvlink: the peer specific link used for broadcast reception
468  * @inputq: queue to put messages ready for delivery
469  * @namedq: queue to put binding table update messages ready for delivery
470  * @link: return value, pointer to put the created link
471  * @self: local unicast link id
472  * @peer_id: 128-bit ID of peer
473  *
474  * Return: true if link was created, otherwise false
475  */
476 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
477                       int tolerance, char net_plane, u32 mtu, int priority,
478                       u32 min_win, u32 max_win, u32 session, u32 self,
479                       u32 peer, u8 *peer_id, u16 peer_caps,
480                       struct tipc_link *bc_sndlink,
481                       struct tipc_link *bc_rcvlink,
482                       struct sk_buff_head *inputq,
483                       struct sk_buff_head *namedq,
484                       struct tipc_link **link)
485 {
486         char peer_str[NODE_ID_STR_LEN] = {0,};
487         char self_str[NODE_ID_STR_LEN] = {0,};
488         struct tipc_link *l;
489 
490         l = kzalloc(sizeof(*l), GFP_ATOMIC);
491         if (!l)
492                 return false;
493         *link = l;
494         l->session = session;
495 
496         /* Set link name for unicast links only */
497         if (peer_id) {
498                 tipc_nodeid2string(self_str, tipc_own_id(net));
499                 if (strlen(self_str) > 16)
500                         sprintf(self_str, "%x", self);
501                 tipc_nodeid2string(peer_str, peer_id);
502                 if (strlen(peer_str) > 16)
503                         sprintf(peer_str, "%x", peer);
504         }
505         /* Peer i/f name will be completed by reset/activate message */
506         snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
507                  self_str, if_name, peer_str);
508 
509         strcpy(l->if_name, if_name);
510         l->addr = peer;
511         l->peer_caps = peer_caps;
512         l->net = net;
513         l->in_session = false;
514         l->bearer_id = bearer_id;
515         l->tolerance = tolerance;
516         if (bc_rcvlink)
517                 bc_rcvlink->tolerance = tolerance;
518         l->net_plane = net_plane;
519         l->advertised_mtu = mtu;
520         l->mtu = mtu;
521         l->priority = priority;
522         tipc_link_set_queue_limits(l, min_win, max_win);
523         l->ackers = 1;
524         l->bc_sndlink = bc_sndlink;
525         l->bc_rcvlink = bc_rcvlink;
526         l->inputq = inputq;
527         l->namedq = namedq;
528         l->state = LINK_RESETTING;
529         __skb_queue_head_init(&l->transmq);
530         __skb_queue_head_init(&l->backlogq);
531         __skb_queue_head_init(&l->deferdq);
532         __skb_queue_head_init(&l->failover_deferdq);
533         skb_queue_head_init(&l->wakeupq);
534         skb_queue_head_init(l->inputq);
535         return true;
536 }
537 
538 /**
539  * tipc_link_bc_create - create new link to be used for broadcast
540  * @net: pointer to associated network namespace
541  * @mtu: mtu to be used initially if no peers
542  * @min_win: minimal send window to be used by link
543  * @max_win: maximal send window to be used by link
544  * @inputq: queue to put messages ready for delivery
545  * @namedq: queue to put binding table update messages ready for delivery
546  * @link: return value, pointer to put the created link
547  * @ownnode: identity of own node
548  * @peer: node id of peer node
549  * @peer_id: 128-bit ID of peer
550  * @peer_caps: bitmap describing peer node capabilities
551  * @bc_sndlink: the namespace global link used for broadcast sending
552  *
553  * Return: true if link was created, otherwise false
554  */
555 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
556                          int mtu, u32 min_win, u32 max_win, u16 peer_caps,
557                          struct sk_buff_head *inputq,
558                          struct sk_buff_head *namedq,
559                          struct tipc_link *bc_sndlink,
560                          struct tipc_link **link)
561 {
562         struct tipc_link *l;
563 
564         if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
565                               max_win, 0, ownnode, peer, NULL, peer_caps,
566                               bc_sndlink, NULL, inputq, namedq, link))
567                 return false;
568 
569         l = *link;
570         if (peer_id) {
571                 char peer_str[NODE_ID_STR_LEN] = {0,};
572 
573                 tipc_nodeid2string(peer_str, peer_id);
574                 if (strlen(peer_str) > 16)
575                         sprintf(peer_str, "%x", peer);
576                 /* Broadcast receiver link name: "broadcast-link:<peer>" */
577                 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
578                          peer_str);
579         } else {
580                 strcpy(l->name, tipc_bclink_name);
581         }
582         trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
583         tipc_link_reset(l);
584         l->state = LINK_RESET;
585         l->ackers = 0;
586         l->bc_rcvlink = l;
587 
588         /* Broadcast send link is always up */
589         if (link_is_bc_sndlink(l))
590                 l->state = LINK_ESTABLISHED;
591 
592         /* Disable replicast if even a single peer doesn't support it */
593         if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
594                 tipc_bcast_toggle_rcast(net, false);
595 
596         return true;
597 }
598 
599 /**
600  * tipc_link_fsm_evt - link finite state machine
601  * @l: pointer to link
602  * @evt: state machine event to be processed
603  */
604 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
605 {
606         int rc = 0;
607         int old_state = l->state;
608 
609         switch (l->state) {
610         case LINK_RESETTING:
611                 switch (evt) {
612                 case LINK_PEER_RESET_EVT:
613                         l->state = LINK_PEER_RESET;
614                         break;
615                 case LINK_RESET_EVT:
616                         l->state = LINK_RESET;
617                         break;
618                 case LINK_FAILURE_EVT:
619                 case LINK_FAILOVER_BEGIN_EVT:
620                 case LINK_ESTABLISH_EVT:
621                 case LINK_FAILOVER_END_EVT:
622                 case LINK_SYNCH_BEGIN_EVT:
623                 case LINK_SYNCH_END_EVT:
624                 default:
625                         goto illegal_evt;
626                 }
627                 break;
628         case LINK_RESET:
629                 switch (evt) {
630                 case LINK_PEER_RESET_EVT:
631                         l->state = LINK_ESTABLISHING;
632                         break;
633                 case LINK_FAILOVER_BEGIN_EVT:
634                         l->state = LINK_FAILINGOVER;
635                         break;
636                 case LINK_FAILURE_EVT:
637                 case LINK_RESET_EVT:
638                 case LINK_ESTABLISH_EVT:
639                 case LINK_FAILOVER_END_EVT:
640                         break;
641                 case LINK_SYNCH_BEGIN_EVT:
642                 case LINK_SYNCH_END_EVT:
643                 default:
644                         goto illegal_evt;
645                 }
646                 break;
647         case LINK_PEER_RESET:
648                 switch (evt) {
649                 case LINK_RESET_EVT:
650                         l->state = LINK_ESTABLISHING;
651                         break;
652                 case LINK_PEER_RESET_EVT:
653                 case LINK_ESTABLISH_EVT:
654                 case LINK_FAILURE_EVT:
655                         break;
656                 case LINK_SYNCH_BEGIN_EVT:
657                 case LINK_SYNCH_END_EVT:
658                 case LINK_FAILOVER_BEGIN_EVT:
659                 case LINK_FAILOVER_END_EVT:
660                 default:
661                         goto illegal_evt;
662                 }
663                 break;
664         case LINK_FAILINGOVER:
665                 switch (evt) {
666                 case LINK_FAILOVER_END_EVT:
667                         l->state = LINK_RESET;
668                         break;
669                 case LINK_PEER_RESET_EVT:
670                 case LINK_RESET_EVT:
671                 case LINK_ESTABLISH_EVT:
672                 case LINK_FAILURE_EVT:
673                         break;
674                 case LINK_FAILOVER_BEGIN_EVT:
675                 case LINK_SYNCH_BEGIN_EVT:
676                 case LINK_SYNCH_END_EVT:
677                 default:
678                         goto illegal_evt;
679                 }
680                 break;
681         case LINK_ESTABLISHING:
682                 switch (evt) {
683                 case LINK_ESTABLISH_EVT:
684                         l->state = LINK_ESTABLISHED;
685                         break;
686                 case LINK_FAILOVER_BEGIN_EVT:
687                         l->state = LINK_FAILINGOVER;
688                         break;
689                 case LINK_RESET_EVT:
690                         l->state = LINK_RESET;
691                         break;
692                 case LINK_FAILURE_EVT:
693                 case LINK_PEER_RESET_EVT:
694                 case LINK_SYNCH_BEGIN_EVT:
695                 case LINK_FAILOVER_END_EVT:
696                         break;
697                 case LINK_SYNCH_END_EVT:
698                 default:
699                         goto illegal_evt;
700                 }
701                 break;
702         case LINK_ESTABLISHED:
703                 switch (evt) {
704                 case LINK_PEER_RESET_EVT:
705                         l->state = LINK_PEER_RESET;
706                         rc |= TIPC_LINK_DOWN_EVT;
707                         break;
708                 case LINK_FAILURE_EVT:
709                         l->state = LINK_RESETTING;
710                         rc |= TIPC_LINK_DOWN_EVT;
711                         break;
712                 case LINK_RESET_EVT:
713                         l->state = LINK_RESET;
714                         break;
715                 case LINK_ESTABLISH_EVT:
716                 case LINK_SYNCH_END_EVT:
717                         break;
718                 case LINK_SYNCH_BEGIN_EVT:
719                         l->state = LINK_SYNCHING;
720                         break;
721                 case LINK_FAILOVER_BEGIN_EVT:
722                 case LINK_FAILOVER_END_EVT:
723                 default:
724                         goto illegal_evt;
725                 }
726                 break;
727         case LINK_SYNCHING:
728                 switch (evt) {
729                 case LINK_PEER_RESET_EVT:
730                         l->state = LINK_PEER_RESET;
731                         rc |= TIPC_LINK_DOWN_EVT;
732                         break;
733                 case LINK_FAILURE_EVT:
734                         l->state = LINK_RESETTING;
735                         rc |= TIPC_LINK_DOWN_EVT;
736                         break;
737                 case LINK_RESET_EVT:
738                         l->state = LINK_RESET;
739                         break;
740                 case LINK_ESTABLISH_EVT:
741                 case LINK_SYNCH_BEGIN_EVT:
742                         break;
743                 case LINK_SYNCH_END_EVT:
744                         l->state = LINK_ESTABLISHED;
745                         break;
746                 case LINK_FAILOVER_BEGIN_EVT:
747                 case LINK_FAILOVER_END_EVT:
748                 default:
749                         goto illegal_evt;
750                 }
751                 break;
752         default:
753                 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
754         }
755         trace_tipc_link_fsm(l->name, old_state, l->state, evt);
756         return rc;
757 illegal_evt:
758         pr_err("Illegal FSM event %x in state %x on link %s\n",
759                evt, l->state, l->name);
760         trace_tipc_link_fsm(l->name, old_state, l->state, evt);
761         return rc;
762 }
763 
764 /* link_profile_stats - update statistical profiling of traffic
765  */
766 static void link_profile_stats(struct tipc_link *l)
767 {
768         struct sk_buff *skb;
769         struct tipc_msg *msg;
770         int length;
771 
772         /* Update counters used in statistical profiling of send traffic */
773         l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
774         l->stats.queue_sz_counts++;
775 
776         skb = skb_peek(&l->transmq);
777         if (!skb)
778                 return;
779         msg = buf_msg(skb);
780         length = msg_size(msg);
781 
782         if (msg_user(msg) == MSG_FRAGMENTER) {
783                 if (msg_type(msg) != FIRST_FRAGMENT)
784                         return;
785                 length = msg_size(msg_inner_hdr(msg));
786         }
787         l->stats.msg_lengths_total += length;
788         l->stats.msg_length_counts++;
789         if (length <= 64)
790                 l->stats.msg_length_profile[0]++;
791         else if (length <= 256)
792                 l->stats.msg_length_profile[1]++;
793         else if (length <= 1024)
794                 l->stats.msg_length_profile[2]++;
795         else if (length <= 4096)
796                 l->stats.msg_length_profile[3]++;
797         else if (length <= 16384)
798                 l->stats.msg_length_profile[4]++;
799         else if (length <= 32768)
800                 l->stats.msg_length_profile[5]++;
801         else
802                 l->stats.msg_length_profile[6]++;
803 }
804 
805 /**
806  * tipc_link_too_silent - check if link is "too silent"
807  * @l: tipc link to be checked
808  *
809  * Return: true if the link 'silent_intv_cnt' is about to reach the
810  * 'abort_limit' value, otherwise false
811  */
812 bool tipc_link_too_silent(struct tipc_link *l)
813 {
814         return (l->silent_intv_cnt + 2 > l->abort_limit);
815 }
816 
817 /* tipc_link_timeout - perform periodic task as instructed from node timeout
818  */
819 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
820 {
821         int mtyp = 0;
822         int rc = 0;
823         bool state = false;
824         bool probe = false;
825         bool setup = false;
826         u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
827         u16 bc_acked = l->bc_rcvlink->acked;
828         struct tipc_mon_state *mstate = &l->mon_state;
829 
830         trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
831         trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
832         switch (l->state) {
833         case LINK_ESTABLISHED:
834         case LINK_SYNCHING:
835                 mtyp = STATE_MSG;
836                 link_profile_stats(l);
837                 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
838                 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
839                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
840                 state = bc_acked != bc_snt;
841                 state |= l->bc_rcvlink->rcv_unacked;
842                 state |= l->rcv_unacked;
843                 state |= !skb_queue_empty(&l->transmq);
844                 probe = mstate->probing;
845                 probe |= l->silent_intv_cnt;
846                 if (probe || mstate->monitoring)
847                         l->silent_intv_cnt++;
848                 probe |= !skb_queue_empty(&l->deferdq);
849                 if (l->snd_nxt == l->checkpoint) {
850                         tipc_link_update_cwin(l, 0, 0);
851                         probe = true;
852                 }
853                 l->checkpoint = l->snd_nxt;
854                 break;
855         case LINK_RESET:
856                 setup = l->rst_cnt++ <= 4;
857                 setup |= !(l->rst_cnt % 16);
858                 mtyp = RESET_MSG;
859                 break;
860         case LINK_ESTABLISHING:
861                 setup = true;
862                 mtyp = ACTIVATE_MSG;
863                 break;
864         case LINK_PEER_RESET:
865         case LINK_RESETTING:
866         case LINK_FAILINGOVER:
867                 break;
868         default:
869                 break;
870         }
871 
872         if (state || probe || setup)
873                 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
874 
875         return rc;
876 }
877 
878 /**
879  * link_schedule_user - schedule a message sender for wakeup after congestion
880  * @l: congested link
881  * @hdr: header of message that is being sent
882  * Create pseudo msg to send back to user when congestion abates
883  */
884 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
885 {
886         u32 dnode = tipc_own_addr(l->net);
887         u32 dport = msg_origport(hdr);
888         struct sk_buff *skb;
889 
890         /* Create and schedule wakeup pseudo message */
891         skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
892                               dnode, l->addr, dport, 0, 0);
893         if (!skb)
894                 return -ENOBUFS;
895         msg_set_dest_droppable(buf_msg(skb), true);
896         TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
897         skb_queue_tail(&l->wakeupq, skb);
898         l->stats.link_congs++;
899         trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
900         return -ELINKCONG;
901 }
902 
903 /**
904  * link_prepare_wakeup - prepare users for wakeup after congestion
905  * @l: congested link
906  * Wake up a number of waiting users, as permitted by available space
907  * in the send queue
908  */
909 static void link_prepare_wakeup(struct tipc_link *l)
910 {
911         struct sk_buff_head *wakeupq = &l->wakeupq;
912         struct sk_buff_head *inputq = l->inputq;
913         struct sk_buff *skb, *tmp;
914         struct sk_buff_head tmpq;
915         int avail[5] = {0,};
916         int imp = 0;
917 
918         __skb_queue_head_init(&tmpq);
919 
920         for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
921                 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
922 
923         skb_queue_walk_safe(wakeupq, skb, tmp) {
924                 imp = TIPC_SKB_CB(skb)->chain_imp;
925                 if (avail[imp] <= 0)
926                         continue;
927                 avail[imp]--;
928                 __skb_unlink(skb, wakeupq);
929                 __skb_queue_tail(&tmpq, skb);
930         }
931 
932         spin_lock_bh(&inputq->lock);
933         skb_queue_splice_tail(&tmpq, inputq);
934         spin_unlock_bh(&inputq->lock);
935 
936 }
937 
938 /**
939  * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
940  *                                     the given skb should be next attempted
941  * @skb: skb to set a future retransmission time for
942  * @l: link the skb will be transmitted on
943  */
944 static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
945                                               struct tipc_link *l)
946 {
947         if (link_is_bc_sndlink(l))
948                 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
949         else
950                 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
951 }
952 
953 void tipc_link_reset(struct tipc_link *l)
954 {
955         struct sk_buff_head list;
956         u32 imp;
957 
958         __skb_queue_head_init(&list);
959 
960         l->in_session = false;
961         /* Force re-synch of peer session number before establishing */
962         l->peer_session--;
963         l->session++;
964         l->mtu = l->advertised_mtu;
965 
966         spin_lock_bh(&l->wakeupq.lock);
967         skb_queue_splice_init(&l->wakeupq, &list);
968         spin_unlock_bh(&l->wakeupq.lock);
969 
970         spin_lock_bh(&l->inputq->lock);
971         skb_queue_splice_init(&list, l->inputq);
972         spin_unlock_bh(&l->inputq->lock);
973 
974         __skb_queue_purge(&l->transmq);
975         __skb_queue_purge(&l->deferdq);
976         __skb_queue_purge(&l->backlogq);
977         __skb_queue_purge(&l->failover_deferdq);
978         for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
979                 l->backlog[imp].len = 0;
980                 l->backlog[imp].target_bskb = NULL;
981         }
982         kfree_skb(l->reasm_buf);
983         kfree_skb(l->reasm_tnlmsg);
984         kfree_skb(l->failover_reasm_skb);
985         l->reasm_buf = NULL;
986         l->reasm_tnlmsg = NULL;
987         l->failover_reasm_skb = NULL;
988         l->rcv_unacked = 0;
989         l->snd_nxt = 1;
990         l->rcv_nxt = 1;
991         l->snd_nxt_state = 1;
992         l->rcv_nxt_state = 1;
993         l->acked = 0;
994         l->last_gap = 0;
995         kfree(l->last_ga);
996         l->last_ga = NULL;
997         l->silent_intv_cnt = 0;
998         l->rst_cnt = 0;
999         l->bc_peer_is_up = false;
1000         memset(&l->mon_state, 0, sizeof(l->mon_state));
1001         tipc_link_reset_stats(l);
1002 }
1003 
1004 /**
1005  * tipc_link_xmit(): enqueue buffer list according to queue situation
1006  * @l: link to use
1007  * @list: chain of buffers containing message
1008  * @xmitq: returned list of packets to be sent by caller
1009  *
1010  * Consumes the buffer chain.
1011  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1012  * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1013  */
1014 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1015                    struct sk_buff_head *xmitq)
1016 {
1017         struct sk_buff_head *backlogq = &l->backlogq;
1018         struct sk_buff_head *transmq = &l->transmq;
1019         struct sk_buff *skb, *_skb;
1020         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1021         u16 ack = l->rcv_nxt - 1;
1022         u16 seqno = l->snd_nxt;
1023         int pkt_cnt = skb_queue_len(list);
1024         unsigned int mss = tipc_link_mss(l);
1025         unsigned int cwin = l->window;
1026         unsigned int mtu = l->mtu;
1027         struct tipc_msg *hdr;
1028         bool new_bundle;
1029         int rc = 0;
1030         int imp;
1031 
1032         if (pkt_cnt <= 0)
1033                 return 0;
1034 
1035         hdr = buf_msg(skb_peek(list));
1036         if (unlikely(msg_size(hdr) > mtu)) {
1037                 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1038                         skb_queue_len(list), msg_user(hdr),
1039                         msg_type(hdr), msg_size(hdr), mtu);
1040                 __skb_queue_purge(list);
1041                 return -EMSGSIZE;
1042         }
1043 
1044         imp = msg_importance(hdr);
1045         /* Allow oversubscription of one data msg per source at congestion */
1046         if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1047                 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1048                         pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1049                         return -ENOBUFS;
1050                 }
1051                 rc = link_schedule_user(l, hdr);
1052         }
1053 
1054         if (pkt_cnt > 1) {
1055                 l->stats.sent_fragmented++;
1056                 l->stats.sent_fragments += pkt_cnt;
1057         }
1058 
1059         /* Prepare each packet for sending, and add to relevant queue: */
1060         while ((skb = __skb_dequeue(list))) {
1061                 if (likely(skb_queue_len(transmq) < cwin)) {
1062                         hdr = buf_msg(skb);
1063                         msg_set_seqno(hdr, seqno);
1064                         msg_set_ack(hdr, ack);
1065                         msg_set_bcast_ack(hdr, bc_ack);
1066                         _skb = skb_clone(skb, GFP_ATOMIC);
1067                         if (!_skb) {
1068                                 kfree_skb(skb);
1069                                 __skb_queue_purge(list);
1070                                 return -ENOBUFS;
1071                         }
1072                         __skb_queue_tail(transmq, skb);
1073                         tipc_link_set_skb_retransmit_time(skb, l);
1074                         __skb_queue_tail(xmitq, _skb);
1075                         TIPC_SKB_CB(skb)->ackers = l->ackers;
1076                         l->rcv_unacked = 0;
1077                         l->stats.sent_pkts++;
1078                         seqno++;
1079                         continue;
1080                 }
1081                 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1082                                         mss, l->addr, &new_bundle)) {
1083                         if (skb) {
1084                                 /* Keep a ref. to the skb for next try */
1085                                 l->backlog[imp].target_bskb = skb;
1086                                 l->backlog[imp].len++;
1087                                 __skb_queue_tail(backlogq, skb);
1088                         } else {
1089                                 if (new_bundle) {
1090                                         l->stats.sent_bundles++;
1091                                         l->stats.sent_bundled++;
1092                                 }
1093                                 l->stats.sent_bundled++;
1094                         }
1095                         continue;
1096                 }
1097                 l->backlog[imp].target_bskb = NULL;
1098                 l->backlog[imp].len += (1 + skb_queue_len(list));
1099                 __skb_queue_tail(backlogq, skb);
1100                 skb_queue_splice_tail_init(list, backlogq);
1101         }
1102         l->snd_nxt = seqno;
1103         return rc;
1104 }
1105 
1106 static void tipc_link_update_cwin(struct tipc_link *l, int released,
1107                                   bool retransmitted)
1108 {
1109         int bklog_len = skb_queue_len(&l->backlogq);
1110         struct sk_buff_head *txq = &l->transmq;
1111         int txq_len = skb_queue_len(txq);
1112         u16 cwin = l->window;
1113 
1114         /* Enter fast recovery */
1115         if (unlikely(retransmitted)) {
1116                 l->ssthresh = max_t(u16, l->window / 2, 300);
1117                 l->window = min_t(u16, l->ssthresh, l->window);
1118                 return;
1119         }
1120         /* Enter slow start */
1121         if (unlikely(!released)) {
1122                 l->ssthresh = max_t(u16, l->window / 2, 300);
1123                 l->window = l->min_win;
1124                 return;
1125         }
1126         /* Don't increase window if no pressure on the transmit queue */
1127         if (txq_len + bklog_len < cwin)
1128                 return;
1129 
1130         /* Don't increase window if there are holes the transmit queue */
1131         if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1132                 return;
1133 
1134         l->cong_acks += released;
1135 
1136         /* Slow start  */
1137         if (cwin <= l->ssthresh) {
1138                 l->window = min_t(u16, cwin + released, l->max_win);
1139                 return;
1140         }
1141         /* Congestion avoidance */
1142         if (l->cong_acks < cwin)
1143                 return;
1144         l->window = min_t(u16, ++cwin, l->max_win);
1145         l->cong_acks = 0;
1146 }
1147 
1148 static void tipc_link_advance_backlog(struct tipc_link *l,
1149                                       struct sk_buff_head *xmitq)
1150 {
1151         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1152         struct sk_buff_head *txq = &l->transmq;
1153         struct sk_buff *skb, *_skb;
1154         u16 ack = l->rcv_nxt - 1;
1155         u16 seqno = l->snd_nxt;
1156         struct tipc_msg *hdr;
1157         u16 cwin = l->window;
1158         u32 imp;
1159 
1160         while (skb_queue_len(txq) < cwin) {
1161                 skb = skb_peek(&l->backlogq);
1162                 if (!skb)
1163                         break;
1164                 _skb = skb_clone(skb, GFP_ATOMIC);
1165                 if (!_skb)
1166                         break;
1167                 __skb_dequeue(&l->backlogq);
1168                 hdr = buf_msg(skb);
1169                 imp = msg_importance(hdr);
1170                 l->backlog[imp].len--;
1171                 if (unlikely(skb == l->backlog[imp].target_bskb))
1172                         l->backlog[imp].target_bskb = NULL;
1173                 __skb_queue_tail(&l->transmq, skb);
1174                 tipc_link_set_skb_retransmit_time(skb, l);
1175 
1176                 __skb_queue_tail(xmitq, _skb);
1177                 TIPC_SKB_CB(skb)->ackers = l->ackers;
1178                 msg_set_seqno(hdr, seqno);
1179                 msg_set_ack(hdr, ack);
1180                 msg_set_bcast_ack(hdr, bc_ack);
1181                 l->rcv_unacked = 0;
1182                 l->stats.sent_pkts++;
1183                 seqno++;
1184         }
1185         l->snd_nxt = seqno;
1186 }
1187 
1188 /**
1189  * link_retransmit_failure() - Detect repeated retransmit failures
1190  * @l: tipc link sender
1191  * @r: tipc link receiver (= l in case of unicast)
1192  * @rc: returned code
1193  *
1194  * Return: true if the repeated retransmit failures happens, otherwise
1195  * false
1196  */
1197 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1198                                     int *rc)
1199 {
1200         struct sk_buff *skb = skb_peek(&l->transmq);
1201         struct tipc_msg *hdr;
1202 
1203         if (!skb)
1204                 return false;
1205 
1206         if (!TIPC_SKB_CB(skb)->retr_cnt)
1207                 return false;
1208 
1209         if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1210                         msecs_to_jiffies(r->tolerance * 10)))
1211                 return false;
1212 
1213         hdr = buf_msg(skb);
1214         if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1215                 return false;
1216 
1217         pr_warn("Retransmission failure on link <%s>\n", l->name);
1218         link_print(l, "State of link ");
1219         pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1220                 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1221         pr_info("sqno %u, prev: %x, dest: %x\n",
1222                 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1223         pr_info("retr_stamp %d, retr_cnt %d\n",
1224                 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1225                 TIPC_SKB_CB(skb)->retr_cnt);
1226 
1227         trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1228         trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1229         trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1230 
1231         if (link_is_bc_sndlink(l)) {
1232                 r->state = LINK_RESET;
1233                 *rc |= TIPC_LINK_DOWN_EVT;
1234         } else {
1235                 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1236         }
1237 
1238         return true;
1239 }
1240 
1241 /* tipc_data_input - deliver data and name distr msgs to upper layer
1242  *
1243  * Consumes buffer if message is of right type
1244  * Node lock must be held
1245  */
1246 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1247                             struct sk_buff_head *inputq)
1248 {
1249         struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1250         struct tipc_msg *hdr = buf_msg(skb);
1251 
1252         switch (msg_user(hdr)) {
1253         case TIPC_LOW_IMPORTANCE:
1254         case TIPC_MEDIUM_IMPORTANCE:
1255         case TIPC_HIGH_IMPORTANCE:
1256         case TIPC_CRITICAL_IMPORTANCE:
1257                 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1258                         skb_queue_tail(mc_inputq, skb);
1259                         return true;
1260                 }
1261                 fallthrough;
1262         case CONN_MANAGER:
1263                 skb_queue_tail(inputq, skb);
1264                 return true;
1265         case GROUP_PROTOCOL:
1266                 skb_queue_tail(mc_inputq, skb);
1267                 return true;
1268         case NAME_DISTRIBUTOR:
1269                 l->bc_rcvlink->state = LINK_ESTABLISHED;
1270                 skb_queue_tail(l->namedq, skb);
1271                 return true;
1272         case MSG_BUNDLER:
1273         case TUNNEL_PROTOCOL:
1274         case MSG_FRAGMENTER:
1275         case BCAST_PROTOCOL:
1276                 return false;
1277 #ifdef CONFIG_TIPC_CRYPTO
1278         case MSG_CRYPTO:
1279                 if (sysctl_tipc_key_exchange_enabled &&
1280                     TIPC_SKB_CB(skb)->decrypted) {
1281                         tipc_crypto_msg_rcv(l->net, skb);
1282                         return true;
1283                 }
1284                 fallthrough;
1285 #endif
1286         default:
1287                 pr_warn("Dropping received illegal msg type\n");
1288                 kfree_skb(skb);
1289                 return true;
1290         }
1291 }
1292 
1293 /* tipc_link_input - process packet that has passed link protocol check
1294  *
1295  * Consumes buffer
1296  */
1297 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1298                            struct sk_buff_head *inputq,
1299                            struct sk_buff **reasm_skb)
1300 {
1301         struct tipc_msg *hdr = buf_msg(skb);
1302         struct sk_buff *iskb;
1303         struct sk_buff_head tmpq;
1304         int usr = msg_user(hdr);
1305         int pos = 0;
1306 
1307         if (usr == MSG_BUNDLER) {
1308                 skb_queue_head_init(&tmpq);
1309                 l->stats.recv_bundles++;
1310                 l->stats.recv_bundled += msg_msgcnt(hdr);
1311                 while (tipc_msg_extract(skb, &iskb, &pos))
1312                         tipc_data_input(l, iskb, &tmpq);
1313                 tipc_skb_queue_splice_tail(&tmpq, inputq);
1314                 return 0;
1315         } else if (usr == MSG_FRAGMENTER) {
1316                 l->stats.recv_fragments++;
1317                 if (tipc_buf_append(reasm_skb, &skb)) {
1318                         l->stats.recv_fragmented++;
1319                         tipc_data_input(l, skb, inputq);
1320                 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1321                         pr_warn_ratelimited("Unable to build fragment list\n");
1322                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1323                 }
1324                 return 0;
1325         } else if (usr == BCAST_PROTOCOL) {
1326                 tipc_bcast_lock(l->net);
1327                 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1328                 tipc_bcast_unlock(l->net);
1329         }
1330 
1331         kfree_skb(skb);
1332         return 0;
1333 }
1334 
1335 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1336  *                       inner message along with the ones in the old link's
1337  *                       deferdq
1338  * @l: tunnel link
1339  * @skb: TUNNEL_PROTOCOL message
1340  * @inputq: queue to put messages ready for delivery
1341  */
1342 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1343                              struct sk_buff_head *inputq)
1344 {
1345         struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1346         struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1347         struct sk_buff_head *fdefq = &l->failover_deferdq;
1348         struct tipc_msg *hdr = buf_msg(skb);
1349         struct sk_buff *iskb;
1350         int ipos = 0;
1351         int rc = 0;
1352         u16 seqno;
1353 
1354         if (msg_type(hdr) == SYNCH_MSG) {
1355                 kfree_skb(skb);
1356                 return 0;
1357         }
1358 
1359         /* Not a fragment? */
1360         if (likely(!msg_nof_fragms(hdr))) {
1361                 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1362                         pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1363                                             skb_queue_len(fdefq));
1364                         return 0;
1365                 }
1366                 kfree_skb(skb);
1367         } else {
1368                 /* Set fragment type for buf_append */
1369                 if (msg_fragm_no(hdr) == 1)
1370                         msg_set_type(hdr, FIRST_FRAGMENT);
1371                 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1372                         msg_set_type(hdr, FRAGMENT);
1373                 else
1374                         msg_set_type(hdr, LAST_FRAGMENT);
1375 
1376                 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1377                         /* Successful but non-complete reassembly? */
1378                         if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1379                                 return 0;
1380                         pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1381                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1382                 }
1383                 iskb = skb;
1384         }
1385 
1386         do {
1387                 seqno = buf_seqno(iskb);
1388                 if (unlikely(less(seqno, l->drop_point))) {
1389                         kfree_skb(iskb);
1390                         continue;
1391                 }
1392                 if (unlikely(seqno != l->drop_point)) {
1393                         __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1394                         continue;
1395                 }
1396 
1397                 l->drop_point++;
1398                 if (!tipc_data_input(l, iskb, inputq))
1399                         rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1400                 if (unlikely(rc))
1401                         break;
1402         } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1403 
1404         return rc;
1405 }
1406 
1407 /**
1408  * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1409  * @ga: returned pointer to the Gap ACK blocks if any
1410  * @l: the tipc link
1411  * @hdr: the PROTOCOL/STATE_MSG header
1412  * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1413  *
1414  * Return: the total Gap ACK blocks size
1415  */
1416 u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1417                           struct tipc_msg *hdr, bool uc)
1418 {
1419         struct tipc_gap_ack_blks *p;
1420         u16 sz = 0;
1421 
1422         /* Does peer support the Gap ACK blocks feature? */
1423         if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1424                 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1425                 sz = ntohs(p->len);
1426                 /* Sanity check */
1427                 if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
1428                         /* Good, check if the desired type exists */
1429                         if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1430                                 goto ok;
1431                 /* Backward compatible: peer might not support bc, but uc? */
1432                 } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1433                         if (p->ugack_cnt) {
1434                                 p->bgack_cnt = 0;
1435                                 goto ok;
1436                         }
1437                 }
1438         }
1439         /* Other cases: ignore! */
1440         p = NULL;
1441 
1442 ok:
1443         *ga = p;
1444         return sz;
1445 }
1446 
1447 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1448                                     struct tipc_link *l, u8 start_index)
1449 {
1450         struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1451         struct sk_buff *skb = skb_peek(&l->deferdq);
1452         u16 expect, seqno = 0;
1453         u8 n = 0;
1454 
1455         if (!skb)
1456                 return 0;
1457 
1458         expect = buf_seqno(skb);
1459         skb_queue_walk(&l->deferdq, skb) {
1460                 seqno = buf_seqno(skb);
1461                 if (unlikely(more(seqno, expect))) {
1462                         gacks[n].ack = htons(expect - 1);
1463                         gacks[n].gap = htons(seqno - expect);
1464                         if (++n >= MAX_GAP_ACK_BLKS / 2) {
1465                                 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1466                                                     l->name, n,
1467                                                     skb_queue_len(&l->deferdq));
1468                                 return n;
1469                         }
1470                 } else if (unlikely(less(seqno, expect))) {
1471                         pr_warn("Unexpected skb in deferdq!\n");
1472                         continue;
1473                 }
1474                 expect = seqno + 1;
1475         }
1476 
1477         /* last block */
1478         gacks[n].ack = htons(seqno);
1479         gacks[n].gap = 0;
1480         n++;
1481         return n;
1482 }
1483 
1484 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1485  * @l: tipc unicast link
1486  * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1487  *
1488  * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1489  * links of a certain peer, the buffer after built has the network data format
1490  * as found at the struct tipc_gap_ack_blks definition.
1491  *
1492  * returns the actual allocated memory size
1493  */
1494 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1495 {
1496         struct tipc_link *bcl = l->bc_rcvlink;
1497         struct tipc_gap_ack_blks *ga;
1498         u16 len;
1499 
1500         ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1501 
1502         /* Start with broadcast link first */
1503         tipc_bcast_lock(bcl->net);
1504         msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1505         msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1506         ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1507         tipc_bcast_unlock(bcl->net);
1508 
1509         /* Now for unicast link, but an explicit NACK only (???) */
1510         ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1511                         __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1512 
1513         /* Total len */
1514         len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
1515         ga->len = htons(len);
1516         return len;
1517 }
1518 
1519 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1520  *                             acked packets, also doing retransmissions if
1521  *                             gaps found
1522  * @l: tipc link with transmq queue to be advanced
1523  * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1524  * @acked: seqno of last packet acked by peer without any gaps before
1525  * @gap: # of gap packets
1526  * @ga: buffer pointer to Gap ACK blocks from peer
1527  * @xmitq: queue for accumulating the retransmitted packets if any
1528  * @retransmitted: returned boolean value if a retransmission is really issued
1529  * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1530  *      happens (- unlikely case)
1531  *
1532  * Return: the number of packets released from the link transmq
1533  */
1534 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1535                                      u16 acked, u16 gap,
1536                                      struct tipc_gap_ack_blks *ga,
1537                                      struct sk_buff_head *xmitq,
1538                                      bool *retransmitted, int *rc)
1539 {
1540         struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1541         struct tipc_gap_ack *gacks = NULL;
1542         struct sk_buff *skb, *_skb, *tmp;
1543         struct tipc_msg *hdr;
1544         u32 qlen = skb_queue_len(&l->transmq);
1545         u16 nacked = acked, ngap = gap, gack_cnt = 0;
1546         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1547         u16 ack = l->rcv_nxt - 1;
1548         u16 seqno, n = 0;
1549         u16 end = r->acked, start = end, offset = r->last_gap;
1550         u16 si = (last_ga) ? last_ga->start_index : 0;
1551         bool is_uc = !link_is_bc_sndlink(l);
1552         bool bc_has_acked = false;
1553 
1554         trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1555 
1556         /* Determine Gap ACK blocks if any for the particular link */
1557         if (ga && is_uc) {
1558                 /* Get the Gap ACKs, uc part */
1559                 gack_cnt = ga->ugack_cnt;
1560                 gacks = &ga->gacks[ga->bgack_cnt];
1561         } else if (ga) {
1562                 /* Copy the Gap ACKs, bc part, for later renewal if needed */
1563                 this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1564                                   GFP_ATOMIC);
1565                 if (likely(this_ga)) {
1566                         this_ga->start_index = 0;
1567                         /* Start with the bc Gap ACKs */
1568                         gack_cnt = this_ga->bgack_cnt;
1569                         gacks = &this_ga->gacks[0];
1570                 } else {
1571                         /* Hmm, we can get in trouble..., simply ignore it */
1572                         pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1573                 }
1574         }
1575 
1576         /* Advance the link transmq */
1577         skb_queue_walk_safe(&l->transmq, skb, tmp) {
1578                 seqno = buf_seqno(skb);
1579 
1580 next_gap_ack:
1581                 if (less_eq(seqno, nacked)) {
1582                         if (is_uc)
1583                                 goto release;
1584                         /* Skip packets peer has already acked */
1585                         if (!more(seqno, r->acked))
1586                                 continue;
1587                         /* Get the next of last Gap ACK blocks */
1588                         while (more(seqno, end)) {
1589                                 if (!last_ga || si >= last_ga->bgack_cnt)
1590                                         break;
1591                                 start = end + offset + 1;
1592                                 end = ntohs(last_ga->gacks[si].ack);
1593                                 offset = ntohs(last_ga->gacks[si].gap);
1594                                 si++;
1595                                 WARN_ONCE(more(start, end) ||
1596                                           (!offset &&
1597                                            si < last_ga->bgack_cnt) ||
1598                                           si > MAX_GAP_ACK_BLKS,
1599                                           "Corrupted Gap ACK: %d %d %d %d %d\n",
1600                                           start, end, offset, si,
1601                                           last_ga->bgack_cnt);
1602                         }
1603                         /* Check against the last Gap ACK block */
1604                         if (tipc_in_range(seqno, start, end))
1605                                 continue;
1606                         /* Update/release the packet peer is acking */
1607                         bc_has_acked = true;
1608                         if (--TIPC_SKB_CB(skb)->ackers)
1609                                 continue;
1610 release:
1611                         /* release skb */
1612                         __skb_unlink(skb, &l->transmq);
1613                         kfree_skb(skb);
1614                 } else if (less_eq(seqno, nacked + ngap)) {
1615                         /* First gap: check if repeated retrans failures? */
1616                         if (unlikely(seqno == acked + 1 &&
1617                                      link_retransmit_failure(l, r, rc))) {
1618                                 /* Ignore this bc Gap ACKs if any */
1619                                 kfree(this_ga);
1620                                 this_ga = NULL;
1621                                 break;
1622                         }
1623                         /* retransmit skb if unrestricted*/
1624                         if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1625                                 continue;
1626                         tipc_link_set_skb_retransmit_time(skb, l);
1627                         _skb = pskb_copy(skb, GFP_ATOMIC);
1628                         if (!_skb)
1629                                 continue;
1630                         hdr = buf_msg(_skb);
1631                         msg_set_ack(hdr, ack);
1632                         msg_set_bcast_ack(hdr, bc_ack);
1633                         _skb->priority = TC_PRIO_CONTROL;
1634                         __skb_queue_tail(xmitq, _skb);
1635                         l->stats.retransmitted++;
1636                         if (!is_uc)
1637                                 r->stats.retransmitted++;
1638                         *retransmitted = true;
1639                         /* Increase actual retrans counter & mark first time */
1640                         if (!TIPC_SKB_CB(skb)->retr_cnt++)
1641                                 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1642                 } else {
1643                         /* retry with Gap ACK blocks if any */
1644                         if (n >= gack_cnt)
1645                                 break;
1646                         nacked = ntohs(gacks[n].ack);
1647                         ngap = ntohs(gacks[n].gap);
1648                         n++;
1649                         goto next_gap_ack;
1650                 }
1651         }
1652 
1653         /* Renew last Gap ACK blocks for bc if needed */
1654         if (bc_has_acked) {
1655                 if (this_ga) {
1656                         kfree(last_ga);
1657                         r->last_ga = this_ga;
1658                         r->last_gap = gap;
1659                 } else if (last_ga) {
1660                         if (less(acked, start)) {
1661                                 si--;
1662                                 offset = start - acked - 1;
1663                         } else if (less(acked, end)) {
1664                                 acked = end;
1665                         }
1666                         if (si < last_ga->bgack_cnt) {
1667                                 last_ga->start_index = si;
1668                                 r->last_gap = offset;
1669                         } else {
1670                                 kfree(last_ga);
1671                                 r->last_ga = NULL;
1672                                 r->last_gap = 0;
1673                         }
1674                 } else {
1675                         r->last_gap = 0;
1676                 }
1677                 r->acked = acked;
1678         } else {
1679                 kfree(this_ga);
1680         }
1681 
1682         return qlen - skb_queue_len(&l->transmq);
1683 }
1684 
1685 /* tipc_link_build_state_msg: prepare link state message for transmission
1686  *
1687  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1688  * risk of ack storms towards the sender
1689  */
1690 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1691 {
1692         if (!l)
1693                 return 0;
1694 
1695         /* Broadcast ACK must be sent via a unicast link => defer to caller */
1696         if (link_is_bc_rcvlink(l)) {
1697                 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1698                         return 0;
1699                 l->rcv_unacked = 0;
1700 
1701                 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1702                 l->snd_nxt = l->rcv_nxt;
1703                 return TIPC_LINK_SND_STATE;
1704         }
1705         /* Unicast ACK */
1706         l->rcv_unacked = 0;
1707         l->stats.sent_acks++;
1708         tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1709         return 0;
1710 }
1711 
1712 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1713  */
1714 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1715 {
1716         int mtyp = RESET_MSG;
1717         struct sk_buff *skb;
1718 
1719         if (l->state == LINK_ESTABLISHING)
1720                 mtyp = ACTIVATE_MSG;
1721 
1722         tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1723 
1724         /* Inform peer that this endpoint is going down if applicable */
1725         skb = skb_peek_tail(xmitq);
1726         if (skb && (l->state == LINK_RESET))
1727                 msg_set_peer_stopping(buf_msg(skb), 1);
1728 }
1729 
1730 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1731  * Note that sending of broadcast NACK is coordinated among nodes, to
1732  * reduce the risk of NACK storms towards the sender
1733  */
1734 static int tipc_link_build_nack_msg(struct tipc_link *l,
1735                                     struct sk_buff_head *xmitq)
1736 {
1737         u32 def_cnt = ++l->stats.deferred_recv;
1738         struct sk_buff_head *dfq = &l->deferdq;
1739         u32 defq_len = skb_queue_len(dfq);
1740         int match1, match2;
1741 
1742         if (link_is_bc_rcvlink(l)) {
1743                 match1 = def_cnt & 0xf;
1744                 match2 = tipc_own_addr(l->net) & 0xf;
1745                 if (match1 == match2)
1746                         return TIPC_LINK_SND_STATE;
1747                 return 0;
1748         }
1749 
1750         if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1751                 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1752 
1753                 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1754                                           rcvgap, 0, 0, xmitq);
1755         }
1756         return 0;
1757 }
1758 
1759 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1760  * @l: the link that should handle the message
1761  * @skb: TIPC packet
1762  * @xmitq: queue to place packets to be sent after this call
1763  */
1764 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1765                   struct sk_buff_head *xmitq)
1766 {
1767         struct sk_buff_head *defq = &l->deferdq;
1768         struct tipc_msg *hdr = buf_msg(skb);
1769         u16 seqno, rcv_nxt, win_lim;
1770         int released = 0;
1771         int rc = 0;
1772 
1773         /* Verify and update link state */
1774         if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1775                 return tipc_link_proto_rcv(l, skb, xmitq);
1776 
1777         /* Don't send probe at next timeout expiration */
1778         l->silent_intv_cnt = 0;
1779 
1780         do {
1781                 hdr = buf_msg(skb);
1782                 seqno = msg_seqno(hdr);
1783                 rcv_nxt = l->rcv_nxt;
1784                 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1785 
1786                 if (unlikely(!tipc_link_is_up(l))) {
1787                         if (l->state == LINK_ESTABLISHING)
1788                                 rc = TIPC_LINK_UP_EVT;
1789                         kfree_skb(skb);
1790                         break;
1791                 }
1792 
1793                 /* Drop if outside receive window */
1794                 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1795                         l->stats.duplicates++;
1796                         kfree_skb(skb);
1797                         break;
1798                 }
1799                 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1800                                                       NULL, NULL, NULL, NULL);
1801 
1802                 /* Defer delivery if sequence gap */
1803                 if (unlikely(seqno != rcv_nxt)) {
1804                         if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1805                                 l->stats.duplicates++;
1806                         rc |= tipc_link_build_nack_msg(l, xmitq);
1807                         break;
1808                 }
1809 
1810                 /* Deliver packet */
1811                 l->rcv_nxt++;
1812                 l->stats.recv_pkts++;
1813 
1814                 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1815                         rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1816                 else if (!tipc_data_input(l, skb, l->inputq))
1817                         rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1818                 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1819                         rc |= tipc_link_build_state_msg(l, xmitq);
1820                 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1821                         break;
1822         } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1823 
1824         /* Forward queues and wake up waiting users */
1825         if (released) {
1826                 tipc_link_update_cwin(l, released, 0);
1827                 tipc_link_advance_backlog(l, xmitq);
1828                 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1829                         link_prepare_wakeup(l);
1830         }
1831         return rc;
1832 }
1833 
1834 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1835                                       bool probe_reply, u16 rcvgap,
1836                                       int tolerance, int priority,
1837                                       struct sk_buff_head *xmitq)
1838 {
1839         struct tipc_mon_state *mstate = &l->mon_state;
1840         struct sk_buff_head *dfq = &l->deferdq;
1841         struct tipc_link *bcl = l->bc_rcvlink;
1842         struct tipc_msg *hdr;
1843         struct sk_buff *skb;
1844         bool node_up = tipc_link_is_up(bcl);
1845         u16 glen = 0, bc_rcvgap = 0;
1846         int dlen = 0;
1847         void *data;
1848 
1849         /* Don't send protocol message during reset or link failover */
1850         if (tipc_link_is_blocked(l))
1851                 return;
1852 
1853         if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1854                 return;
1855 
1856         if ((probe || probe_reply) && !skb_queue_empty(dfq))
1857                 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1858 
1859         skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1860                               tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1861                               l->addr, tipc_own_addr(l->net), 0, 0, 0);
1862         if (!skb)
1863                 return;
1864 
1865         hdr = buf_msg(skb);
1866         data = msg_data(hdr);
1867         msg_set_session(hdr, l->session);
1868         msg_set_bearer_id(hdr, l->bearer_id);
1869         msg_set_net_plane(hdr, l->net_plane);
1870         msg_set_next_sent(hdr, l->snd_nxt);
1871         msg_set_ack(hdr, l->rcv_nxt - 1);
1872         msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1873         msg_set_bc_ack_invalid(hdr, !node_up);
1874         msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1875         msg_set_link_tolerance(hdr, tolerance);
1876         msg_set_linkprio(hdr, priority);
1877         msg_set_redundant_link(hdr, node_up);
1878         msg_set_seq_gap(hdr, 0);
1879         msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1880 
1881         if (mtyp == STATE_MSG) {
1882                 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1883                         msg_set_seqno(hdr, l->snd_nxt_state++);
1884                 msg_set_seq_gap(hdr, rcvgap);
1885                 bc_rcvgap = link_bc_rcv_gap(bcl);
1886                 msg_set_bc_gap(hdr, bc_rcvgap);
1887                 msg_set_probe(hdr, probe);
1888                 msg_set_is_keepalive(hdr, probe || probe_reply);
1889                 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1890                         glen = tipc_build_gap_ack_blks(l, hdr);
1891                 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1892                 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1893                 skb_trim(skb, INT_H_SIZE + glen + dlen);
1894                 l->stats.sent_states++;
1895                 l->rcv_unacked = 0;
1896         } else {
1897                 /* RESET_MSG or ACTIVATE_MSG */
1898                 if (mtyp == ACTIVATE_MSG) {
1899                         msg_set_dest_session_valid(hdr, 1);
1900                         msg_set_dest_session(hdr, l->peer_session);
1901                 }
1902                 msg_set_max_pkt(hdr, l->advertised_mtu);
1903                 strcpy(data, l->if_name);
1904                 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1905                 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1906         }
1907         if (probe)
1908                 l->stats.sent_probes++;
1909         if (rcvgap)
1910                 l->stats.sent_nacks++;
1911         if (bc_rcvgap)
1912                 bcl->stats.sent_nacks++;
1913         skb->priority = TC_PRIO_CONTROL;
1914         __skb_queue_tail(xmitq, skb);
1915         trace_tipc_proto_build(skb, false, l->name);
1916 }
1917 
1918 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1919                                     struct sk_buff_head *xmitq)
1920 {
1921         u32 onode = tipc_own_addr(l->net);
1922         struct tipc_msg *hdr, *ihdr;
1923         struct sk_buff_head tnlq;
1924         struct sk_buff *skb;
1925         u32 dnode = l->addr;
1926 
1927         __skb_queue_head_init(&tnlq);
1928         skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1929                               INT_H_SIZE, BASIC_H_SIZE,
1930                               dnode, onode, 0, 0, 0);
1931         if (!skb) {
1932                 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1933                 return;
1934         }
1935 
1936         hdr = buf_msg(skb);
1937         msg_set_msgcnt(hdr, 1);
1938         msg_set_bearer_id(hdr, l->peer_bearer_id);
1939 
1940         ihdr = (struct tipc_msg *)msg_data(hdr);
1941         tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1942                       BASIC_H_SIZE, dnode);
1943         msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1944         __skb_queue_tail(&tnlq, skb);
1945         tipc_link_xmit(l, &tnlq, xmitq);
1946 }
1947 
1948 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1949  * with contents of the link's transmit and backlog queues.
1950  */
1951 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1952                            int mtyp, struct sk_buff_head *xmitq)
1953 {
1954         struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1955         struct sk_buff *skb, *tnlskb;
1956         struct tipc_msg *hdr, tnlhdr;
1957         struct sk_buff_head *queue = &l->transmq;
1958         struct sk_buff_head tmpxq, tnlq, frags;
1959         u16 pktlen, pktcnt, seqno = l->snd_nxt;
1960         bool pktcnt_need_update = false;
1961         u16 syncpt;
1962         int rc;
1963 
1964         if (!tnl)
1965                 return;
1966 
1967         __skb_queue_head_init(&tnlq);
1968         /* Link Synching:
1969          * From now on, send only one single ("dummy") SYNCH message
1970          * to peer. The SYNCH message does not contain any data, just
1971          * a header conveying the synch point to the peer.
1972          */
1973         if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1974                 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1975                                          INT_H_SIZE, 0, l->addr,
1976                                          tipc_own_addr(l->net),
1977                                          0, 0, 0);
1978                 if (!tnlskb) {
1979                         pr_warn("%sunable to create dummy SYNCH_MSG\n",
1980                                 link_co_err);
1981                         return;
1982                 }
1983 
1984                 hdr = buf_msg(tnlskb);
1985                 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1986                 msg_set_syncpt(hdr, syncpt);
1987                 msg_set_bearer_id(hdr, l->peer_bearer_id);
1988                 __skb_queue_tail(&tnlq, tnlskb);
1989                 tipc_link_xmit(tnl, &tnlq, xmitq);
1990                 return;
1991         }
1992 
1993         __skb_queue_head_init(&tmpxq);
1994         __skb_queue_head_init(&frags);
1995         /* At least one packet required for safe algorithm => add dummy */
1996         skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1997                               BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1998                               0, 0, TIPC_ERR_NO_PORT);
1999         if (!skb) {
2000                 pr_warn("%sunable to create tunnel packet\n", link_co_err);
2001                 return;
2002         }
2003         __skb_queue_tail(&tnlq, skb);
2004         tipc_link_xmit(l, &tnlq, &tmpxq);
2005         __skb_queue_purge(&tmpxq);
2006 
2007         /* Initialize reusable tunnel packet header */
2008         tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2009                       mtyp, INT_H_SIZE, l->addr);
2010         if (mtyp == SYNCH_MSG)
2011                 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2012         else
2013                 pktcnt = skb_queue_len(&l->transmq);
2014         pktcnt += skb_queue_len(&l->backlogq);
2015         msg_set_msgcnt(&tnlhdr, pktcnt);
2016         msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2017 tnl:
2018         /* Wrap each packet into a tunnel packet */
2019         skb_queue_walk(queue, skb) {
2020                 hdr = buf_msg(skb);
2021                 if (queue == &l->backlogq)
2022                         msg_set_seqno(hdr, seqno++);
2023                 pktlen = msg_size(hdr);
2024 
2025                 /* Tunnel link MTU is not large enough? This could be
2026                  * due to:
2027                  * 1) Link MTU has just changed or set differently;
2028                  * 2) Or FAILOVER on the top of a SYNCH message
2029                  *
2030                  * The 2nd case should not happen if peer supports
2031                  * TIPC_TUNNEL_ENHANCED
2032                  */
2033                 if (pktlen > tnl->mtu - INT_H_SIZE) {
2034                         if (mtyp == FAILOVER_MSG &&
2035                             (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2036                                 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2037                                                        &frags);
2038                                 if (rc) {
2039                                         pr_warn("%sunable to frag msg: rc %d\n",
2040                                                 link_co_err, rc);
2041                                         return;
2042                                 }
2043                                 pktcnt += skb_queue_len(&frags) - 1;
2044                                 pktcnt_need_update = true;
2045                                 skb_queue_splice_tail_init(&frags, &tnlq);
2046                                 continue;
2047                         }
2048                         /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2049                          * => Just warn it and return!
2050                          */
2051                         pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2052                                             link_co_err, msg_user(hdr),
2053                                             msg_type(hdr), msg_size(hdr));
2054                         return;
2055                 }
2056 
2057                 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2058                 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2059                 if (!tnlskb) {
2060                         pr_warn("%sunable to send packet\n", link_co_err);
2061                         return;
2062                 }
2063                 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2064                 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2065                 __skb_queue_tail(&tnlq, tnlskb);
2066         }
2067         if (queue != &l->backlogq) {
2068                 queue = &l->backlogq;
2069                 goto tnl;
2070         }
2071 
2072         if (pktcnt_need_update)
2073                 skb_queue_walk(&tnlq, skb) {
2074                         hdr = buf_msg(skb);
2075                         msg_set_msgcnt(hdr, pktcnt);
2076                 }
2077 
2078         tipc_link_xmit(tnl, &tnlq, xmitq);
2079 
2080         if (mtyp == FAILOVER_MSG) {
2081                 tnl->drop_point = l->rcv_nxt;
2082                 tnl->failover_reasm_skb = l->reasm_buf;
2083                 l->reasm_buf = NULL;
2084 
2085                 /* Failover the link's deferdq */
2086                 if (unlikely(!skb_queue_empty(fdefq))) {
2087                         pr_warn("Link failover deferdq not empty: %d!\n",
2088                                 skb_queue_len(fdefq));
2089                         __skb_queue_purge(fdefq);
2090                 }
2091                 skb_queue_splice_init(&l->deferdq, fdefq);
2092         }
2093 }
2094 
2095 /**
2096  * tipc_link_failover_prepare() - prepare tnl for link failover
2097  *
2098  * This is a special version of the precursor - tipc_link_tnl_prepare(),
2099  * see the tipc_node_link_failover() for details
2100  *
2101  * @l: failover link
2102  * @tnl: tunnel link
2103  * @xmitq: queue for messages to be xmited
2104  */
2105 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2106                                 struct sk_buff_head *xmitq)
2107 {
2108         struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2109 
2110         tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2111 
2112         /* This failover link endpoint was never established before,
2113          * so it has not received anything from peer.
2114          * Otherwise, it must be a normal failover situation or the
2115          * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2116          * would have to start over from scratch instead.
2117          */
2118         tnl->drop_point = 1;
2119         tnl->failover_reasm_skb = NULL;
2120 
2121         /* Initiate the link's failover deferdq */
2122         if (unlikely(!skb_queue_empty(fdefq))) {
2123                 pr_warn("Link failover deferdq not empty: %d!\n",
2124                         skb_queue_len(fdefq));
2125                 __skb_queue_purge(fdefq);
2126         }
2127 }
2128 
2129 /* tipc_link_validate_msg(): validate message against current link state
2130  * Returns true if message should be accepted, otherwise false
2131  */
2132 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2133 {
2134         u16 curr_session = l->peer_session;
2135         u16 session = msg_session(hdr);
2136         int mtyp = msg_type(hdr);
2137 
2138         if (msg_user(hdr) != LINK_PROTOCOL)
2139                 return true;
2140 
2141         switch (mtyp) {
2142         case RESET_MSG:
2143                 if (!l->in_session)
2144                         return true;
2145                 /* Accept only RESET with new session number */
2146                 return more(session, curr_session);
2147         case ACTIVATE_MSG:
2148                 if (!l->in_session)
2149                         return true;
2150                 /* Accept only ACTIVATE with new or current session number */
2151                 return !less(session, curr_session);
2152         case STATE_MSG:
2153                 /* Accept only STATE with current session number */
2154                 if (!l->in_session)
2155                         return false;
2156                 if (session != curr_session)
2157                         return false;
2158                 /* Extra sanity check */
2159                 if (!tipc_link_is_up(l) && msg_ack(hdr))
2160                         return false;
2161                 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2162                         return true;
2163                 /* Accept only STATE with new sequence number */
2164                 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2165         default:
2166                 return false;
2167         }
2168 }
2169 
2170 /* tipc_link_proto_rcv(): receive link level protocol message :
2171  * Note that network plane id propagates through the network, and may
2172  * change at any time. The node with lowest numerical id determines
2173  * network plane
2174  */
2175 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2176                                struct sk_buff_head *xmitq)
2177 {
2178         struct tipc_msg *hdr = buf_msg(skb);
2179         struct tipc_gap_ack_blks *ga = NULL;
2180         bool reply = msg_probe(hdr), retransmitted = false;
2181         u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
2182         u16 peers_snd_nxt =  msg_next_sent(hdr);
2183         u16 peers_tol = msg_link_tolerance(hdr);
2184         u16 peers_prio = msg_linkprio(hdr);
2185         u16 gap = msg_seq_gap(hdr);
2186         u16 ack = msg_ack(hdr);
2187         u16 rcv_nxt = l->rcv_nxt;
2188         u16 rcvgap = 0;
2189         int mtyp = msg_type(hdr);
2190         int rc = 0, released;
2191         char *if_name;
2192         void *data;
2193 
2194         trace_tipc_proto_rcv(skb, false, l->name);
2195 
2196         if (dlen > U16_MAX)
2197                 goto exit;
2198 
2199         if (tipc_link_is_blocked(l) || !xmitq)
2200                 goto exit;
2201 
2202         if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2203                 l->net_plane = msg_net_plane(hdr);
2204 
2205         if (skb_linearize(skb))
2206                 goto exit;
2207 
2208         hdr = buf_msg(skb);
2209         data = msg_data(hdr);
2210 
2211         if (!tipc_link_validate_msg(l, hdr)) {
2212                 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2213                 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2214                 goto exit;
2215         }
2216 
2217         switch (mtyp) {
2218         case RESET_MSG:
2219         case ACTIVATE_MSG:
2220                 msg_max = msg_max_pkt(hdr);
2221                 if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
2222                         break;
2223                 /* Complete own link name with peer's interface name */
2224                 if_name =  strrchr(l->name, ':') + 1;
2225                 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2226                         break;
2227                 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2228                         break;
2229                 strncpy(if_name, data, TIPC_MAX_IF_NAME);
2230 
2231                 /* Update own tolerance if peer indicates a non-zero value */
2232                 if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2233                         l->tolerance = peers_tol;
2234                         l->bc_rcvlink->tolerance = peers_tol;
2235                 }
2236                 /* Update own priority if peer's priority is higher */
2237                 if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2238                         l->priority = peers_prio;
2239 
2240                 /* If peer is going down we want full re-establish cycle */
2241                 if (msg_peer_stopping(hdr)) {
2242                         rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2243                         break;
2244                 }
2245 
2246                 /* If this endpoint was re-created while peer was ESTABLISHING
2247                  * it doesn't know current session number. Force re-synch.
2248                  */
2249                 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2250                     l->session != msg_dest_session(hdr)) {
2251                         if (less(l->session, msg_dest_session(hdr)))
2252                                 l->session = msg_dest_session(hdr) + 1;
2253                         break;
2254                 }
2255 
2256                 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2257                 if (mtyp == RESET_MSG || !tipc_link_is_up(l))
2258                         rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2259 
2260                 /* ACTIVATE_MSG takes up link if it was already locally reset */
2261                 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2262                         rc = TIPC_LINK_UP_EVT;
2263 
2264                 l->peer_session = msg_session(hdr);
2265                 l->in_session = true;
2266                 l->peer_bearer_id = msg_bearer_id(hdr);
2267                 if (l->mtu > msg_max)
2268                         l->mtu = msg_max;
2269                 break;
2270 
2271         case STATE_MSG:
2272                 /* Validate Gap ACK blocks, drop if invalid */
2273                 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2274                 if (glen > dlen)
2275                         break;
2276 
2277                 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2278 
2279                 /* Update own tolerance if peer indicates a non-zero value */
2280                 if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2281                         l->tolerance = peers_tol;
2282                         l->bc_rcvlink->tolerance = peers_tol;
2283                 }
2284                 /* Update own prio if peer indicates a different value */
2285                 if ((peers_prio != l->priority) &&
2286                     tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2287                         l->priority = peers_prio;
2288                         rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2289                 }
2290 
2291                 l->silent_intv_cnt = 0;
2292                 l->stats.recv_states++;
2293                 if (msg_probe(hdr))
2294                         l->stats.recv_probes++;
2295 
2296                 if (!tipc_link_is_up(l)) {
2297                         if (l->state == LINK_ESTABLISHING)
2298                                 rc = TIPC_LINK_UP_EVT;
2299                         break;
2300                 }
2301 
2302                 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2303                              &l->mon_state, l->bearer_id);
2304 
2305                 /* Send NACK if peer has sent pkts we haven't received yet */
2306                 if ((reply || msg_is_keepalive(hdr)) &&
2307                     more(peers_snd_nxt, rcv_nxt) &&
2308                     !tipc_link_is_synching(l) &&
2309                     skb_queue_empty(&l->deferdq))
2310                         rcvgap = peers_snd_nxt - l->rcv_nxt;
2311                 if (rcvgap || reply)
2312                         tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2313                                                   rcvgap, 0, 0, xmitq);
2314 
2315                 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2316                                                      &retransmitted, &rc);
2317                 if (gap)
2318                         l->stats.recv_nacks++;
2319                 if (released || retransmitted)
2320                         tipc_link_update_cwin(l, released, retransmitted);
2321                 if (released)
2322                         tipc_link_advance_backlog(l, xmitq);
2323                 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2324                         link_prepare_wakeup(l);
2325         }
2326 exit:
2327         kfree_skb(skb);
2328         return rc;
2329 }
2330 
2331 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2332  */
2333 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2334                                          u16 peers_snd_nxt,
2335                                          struct sk_buff_head *xmitq)
2336 {
2337         struct sk_buff *skb;
2338         struct tipc_msg *hdr;
2339         struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2340         u16 ack = l->rcv_nxt - 1;
2341         u16 gap_to = peers_snd_nxt - 1;
2342 
2343         skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2344                               0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2345         if (!skb)
2346                 return false;
2347         hdr = buf_msg(skb);
2348         msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2349         msg_set_bcast_ack(hdr, ack);
2350         msg_set_bcgap_after(hdr, ack);
2351         if (dfrd_skb)
2352                 gap_to = buf_seqno(dfrd_skb) - 1;
2353         msg_set_bcgap_to(hdr, gap_to);
2354         msg_set_non_seq(hdr, bcast);
2355         __skb_queue_tail(xmitq, skb);
2356         return true;
2357 }
2358 
2359 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2360  *
2361  * Give a newly added peer node the sequence number where it should
2362  * start receiving and acking broadcast packets.
2363  */
2364 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2365                                         struct sk_buff_head *xmitq)
2366 {
2367         struct sk_buff_head list;
2368 
2369         __skb_queue_head_init(&list);
2370         if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2371                 return;
2372         msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2373         tipc_link_xmit(l, &list, xmitq);
2374 }
2375 
2376 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2377  */
2378 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2379 {
2380         int mtyp = msg_type(hdr);
2381         u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2382 
2383         if (tipc_link_is_up(l))
2384                 return;
2385 
2386         if (msg_user(hdr) == BCAST_PROTOCOL) {
2387                 l->rcv_nxt = peers_snd_nxt;
2388                 l->state = LINK_ESTABLISHED;
2389                 return;
2390         }
2391 
2392         if (l->peer_caps & TIPC_BCAST_SYNCH)
2393                 return;
2394 
2395         if (msg_peer_node_is_up(hdr))
2396                 return;
2397 
2398         /* Compatibility: accept older, less safe initial synch data */
2399         if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2400                 l->rcv_nxt = peers_snd_nxt;
2401 }
2402 
2403 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2404  */
2405 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2406                           struct sk_buff_head *xmitq)
2407 {
2408         u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2409         int rc = 0;
2410 
2411         if (!tipc_link_is_up(l))
2412                 return rc;
2413 
2414         if (!msg_peer_node_is_up(hdr))
2415                 return rc;
2416 
2417         /* Open when peer acknowledges our bcast init msg (pkt #1) */
2418         if (msg_ack(hdr))
2419                 l->bc_peer_is_up = true;
2420 
2421         if (!l->bc_peer_is_up)
2422                 return rc;
2423 
2424         /* Ignore if peers_snd_nxt goes beyond receive window */
2425         if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2426                 return rc;
2427 
2428         l->snd_nxt = peers_snd_nxt;
2429         if (link_bc_rcv_gap(l))
2430                 rc |= TIPC_LINK_SND_STATE;
2431 
2432         /* Return now if sender supports nack via STATE messages */
2433         if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2434                 return rc;
2435 
2436         /* Otherwise, be backwards compatible */
2437 
2438         if (!more(peers_snd_nxt, l->rcv_nxt)) {
2439                 l->nack_state = BC_NACK_SND_CONDITIONAL;
2440                 return 0;
2441         }
2442 
2443         /* Don't NACK if one was recently sent or peeked */
2444         if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2445                 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2446                 return 0;
2447         }
2448 
2449         /* Conditionally delay NACK sending until next synch rcv */
2450         if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2451                 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2452                 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2453                         return 0;
2454         }
2455 
2456         /* Send NACK now but suppress next one */
2457         tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2458         l->nack_state = BC_NACK_SND_SUPPRESS;
2459         return 0;
2460 }
2461 
2462 int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2463                          struct tipc_gap_ack_blks *ga,
2464                          struct sk_buff_head *xmitq,
2465                          struct sk_buff_head *retrq)
2466 {
2467         struct tipc_link *l = r->bc_sndlink;
2468         bool unused = false;
2469         int rc = 0;
2470 
2471         if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
2472                 return 0;
2473 
2474         if (gap) {
2475                 l->stats.recv_nacks++;
2476                 r->stats.recv_nacks++;
2477         }
2478 
2479         if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2480                 return 0;
2481 
2482         trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2483         tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2484 
2485         tipc_link_advance_backlog(l, xmitq);
2486         if (unlikely(!skb_queue_empty(&l->wakeupq)))
2487                 link_prepare_wakeup(l);
2488 
2489         return rc;
2490 }
2491 
2492 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2493  * This function is here for backwards compatibility, since
2494  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2495  */
2496 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2497                           struct sk_buff_head *xmitq)
2498 {
2499         struct tipc_msg *hdr = buf_msg(skb);
2500         u32 dnode = msg_destnode(hdr);
2501         int mtyp = msg_type(hdr);
2502         u16 acked = msg_bcast_ack(hdr);
2503         u16 from = acked + 1;
2504         u16 to = msg_bcgap_to(hdr);
2505         u16 peers_snd_nxt = to + 1;
2506         int rc = 0;
2507 
2508         kfree_skb(skb);
2509 
2510         if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2511                 return 0;
2512 
2513         if (mtyp != STATE_MSG)
2514                 return 0;
2515 
2516         if (dnode == tipc_own_addr(l->net)) {
2517                 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2518                                           xmitq);
2519                 l->stats.recv_nacks++;
2520                 return rc;
2521         }
2522 
2523         /* Msg for other node => suppress own NACK at next sync if applicable */
2524         if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2525                 l->nack_state = BC_NACK_SND_SUPPRESS;
2526 
2527         return 0;
2528 }
2529 
2530 void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2531 {
2532         int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2533 
2534         l->min_win = min_win;
2535         l->ssthresh = max_win;
2536         l->max_win = max_win;
2537         l->window = min_win;
2538         l->backlog[TIPC_LOW_IMPORTANCE].limit      = min_win * 2;
2539         l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = min_win * 4;
2540         l->backlog[TIPC_HIGH_IMPORTANCE].limit     = min_win * 6;
2541         l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2542         l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2543 }
2544 
2545 /**
2546  * tipc_link_reset_stats - reset link statistics
2547  * @l: pointer to link
2548  */
2549 void tipc_link_reset_stats(struct tipc_link *l)
2550 {
2551         memset(&l->stats, 0, sizeof(l->stats));
2552 }
2553 
2554 static void link_print(struct tipc_link *l, const char *str)
2555 {
2556         struct sk_buff *hskb = skb_peek(&l->transmq);
2557         u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2558         u16 tail = l->snd_nxt - 1;
2559 
2560         pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2561         pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2562                 skb_queue_len(&l->transmq), head, tail,
2563                 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2564 }
2565 
2566 /* Parse and validate nested (link) properties valid for media, bearer and link
2567  */
2568 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2569 {
2570         int err;
2571 
2572         err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2573                                           tipc_nl_prop_policy, NULL);
2574         if (err)
2575                 return err;
2576 
2577         if (props[TIPC_NLA_PROP_PRIO]) {
2578                 u32 prio;
2579 
2580                 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2581                 if (prio > TIPC_MAX_LINK_PRI)
2582                         return -EINVAL;
2583         }
2584 
2585         if (props[TIPC_NLA_PROP_TOL]) {
2586                 u32 tol;
2587 
2588                 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2589                 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2590                         return -EINVAL;
2591         }
2592 
2593         if (props[TIPC_NLA_PROP_WIN]) {
2594                 u32 max_win;
2595 
2596                 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2597                 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2598                         return -EINVAL;
2599         }
2600 
2601         return 0;
2602 }
2603 
2604 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2605 {
2606         int i;
2607         struct nlattr *stats;
2608 
2609         struct nla_map {
2610                 u32 key;
2611                 u32 val;
2612         };
2613 
2614         struct nla_map map[] = {
2615                 {TIPC_NLA_STATS_RX_INFO, 0},
2616                 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2617                 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2618                 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2619                 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2620                 {TIPC_NLA_STATS_TX_INFO, 0},
2621                 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2622                 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2623                 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2624                 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2625                 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2626                         s->msg_length_counts : 1},
2627                 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2628                 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2629                 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2630                 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2631                 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2632                 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2633                 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2634                 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2635                 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2636                 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2637                 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2638                 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2639                 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2640                 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2641                 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2642                 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2643                 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2644                 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2645                 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2646                 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2647                 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2648                 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2649                         (s->accu_queue_sz / s->queue_sz_counts) : 0}
2650         };
2651 
2652         stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2653         if (!stats)
2654                 return -EMSGSIZE;
2655 
2656         for (i = 0; i <  ARRAY_SIZE(map); i++)
2657                 if (nla_put_u32(skb, map[i].key, map[i].val))
2658                         goto msg_full;
2659 
2660         nla_nest_end(skb, stats);
2661 
2662         return 0;
2663 msg_full:
2664         nla_nest_cancel(skb, stats);
2665 
2666         return -EMSGSIZE;
2667 }
2668 
2669 /* Caller should hold appropriate locks to protect the link */
2670 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2671                        struct tipc_link *link, int nlflags)
2672 {
2673         u32 self = tipc_own_addr(net);
2674         struct nlattr *attrs;
2675         struct nlattr *prop;
2676         void *hdr;
2677         int err;
2678 
2679         hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2680                           nlflags, TIPC_NL_LINK_GET);
2681         if (!hdr)
2682                 return -EMSGSIZE;
2683 
2684         attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2685         if (!attrs)
2686                 goto msg_full;
2687 
2688         if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2689                 goto attr_msg_full;
2690         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2691                 goto attr_msg_full;
2692         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2693                 goto attr_msg_full;
2694         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2695                 goto attr_msg_full;
2696         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2697                 goto attr_msg_full;
2698 
2699         if (tipc_link_is_up(link))
2700                 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2701                         goto attr_msg_full;
2702         if (link->active)
2703                 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2704                         goto attr_msg_full;
2705 
2706         prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2707         if (!prop)
2708                 goto attr_msg_full;
2709         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2710                 goto prop_msg_full;
2711         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2712                 goto prop_msg_full;
2713         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2714                         link->window))
2715                 goto prop_msg_full;
2716         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2717                 goto prop_msg_full;
2718         nla_nest_end(msg->skb, prop);
2719 
2720         err = __tipc_nl_add_stats(msg->skb, &link->stats);
2721         if (err)
2722                 goto attr_msg_full;
2723 
2724         nla_nest_end(msg->skb, attrs);
2725         genlmsg_end(msg->skb, hdr);
2726 
2727         return 0;
2728 
2729 prop_msg_full:
2730         nla_nest_cancel(msg->skb, prop);
2731 attr_msg_full:
2732         nla_nest_cancel(msg->skb, attrs);
2733 msg_full:
2734         genlmsg_cancel(msg->skb, hdr);
2735 
2736         return -EMSGSIZE;
2737 }
2738 
2739 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2740                                       struct tipc_stats *stats)
2741 {
2742         int i;
2743         struct nlattr *nest;
2744 
2745         struct nla_map {
2746                 __u32 key;
2747                 __u32 val;
2748         };
2749 
2750         struct nla_map map[] = {
2751                 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2752                 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2753                 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2754                 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2755                 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2756                 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2757                 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2758                 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2759                 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2760                 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2761                 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2762                 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2763                 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2764                 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2765                 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2766                 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2767                 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2768                 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2769                 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2770                         (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2771         };
2772 
2773         nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2774         if (!nest)
2775                 return -EMSGSIZE;
2776 
2777         for (i = 0; i <  ARRAY_SIZE(map); i++)
2778                 if (nla_put_u32(skb, map[i].key, map[i].val))
2779                         goto msg_full;
2780 
2781         nla_nest_end(skb, nest);
2782 
2783         return 0;
2784 msg_full:
2785         nla_nest_cancel(skb, nest);
2786 
2787         return -EMSGSIZE;
2788 }
2789 
2790 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2791                         struct tipc_link *bcl)
2792 {
2793         int err;
2794         void *hdr;
2795         struct nlattr *attrs;
2796         struct nlattr *prop;
2797         u32 bc_mode = tipc_bcast_get_mode(net);
2798         u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2799 
2800         if (!bcl)
2801                 return 0;
2802 
2803         tipc_bcast_lock(net);
2804 
2805         hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2806                           NLM_F_MULTI, TIPC_NL_LINK_GET);
2807         if (!hdr) {
2808                 tipc_bcast_unlock(net);
2809                 return -EMSGSIZE;
2810         }
2811 
2812         attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2813         if (!attrs)
2814                 goto msg_full;
2815 
2816         /* The broadcast link is always up */
2817         if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2818                 goto attr_msg_full;
2819 
2820         if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2821                 goto attr_msg_full;
2822         if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2823                 goto attr_msg_full;
2824         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2825                 goto attr_msg_full;
2826         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2827                 goto attr_msg_full;
2828 
2829         prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2830         if (!prop)
2831                 goto attr_msg_full;
2832         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2833                 goto prop_msg_full;
2834         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2835                 goto prop_msg_full;
2836         if (bc_mode & BCLINK_MODE_SEL)
2837                 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2838                                 bc_ratio))
2839                         goto prop_msg_full;
2840         nla_nest_end(msg->skb, prop);
2841 
2842         err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2843         if (err)
2844                 goto attr_msg_full;
2845 
2846         tipc_bcast_unlock(net);
2847         nla_nest_end(msg->skb, attrs);
2848         genlmsg_end(msg->skb, hdr);
2849 
2850         return 0;
2851 
2852 prop_msg_full:
2853         nla_nest_cancel(msg->skb, prop);
2854 attr_msg_full:
2855         nla_nest_cancel(msg->skb, attrs);
2856 msg_full:
2857         tipc_bcast_unlock(net);
2858         genlmsg_cancel(msg->skb, hdr);
2859 
2860         return -EMSGSIZE;
2861 }
2862 
2863 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2864                              struct sk_buff_head *xmitq)
2865 {
2866         l->tolerance = tol;
2867         if (l->bc_rcvlink)
2868                 l->bc_rcvlink->tolerance = tol;
2869         if (tipc_link_is_up(l))
2870                 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2871 }
2872 
2873 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2874                         struct sk_buff_head *xmitq)
2875 {
2876         l->priority = prio;
2877         tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2878 }
2879 
2880 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2881 {
2882         l->abort_limit = limit;
2883 }
2884 
2885 /**
2886  * tipc_link_dump - dump TIPC link data
2887  * @l: tipc link to be dumped
2888  * @dqueues: bitmask to decide if any link queue to be dumped?
2889  *           - TIPC_DUMP_NONE: don't dump link queues
2890  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2891  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2892  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2893  *           - TIPC_DUMP_INPUTQ: dump link input queue
2894  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2895  *           - TIPC_DUMP_ALL: dump all the link queues above
2896  * @buf: returned buffer of dump data in format
2897  */
2898 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2899 {
2900         int i = 0;
2901         size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2902         struct sk_buff_head *list;
2903         struct sk_buff *hskb, *tskb;
2904         u32 len;
2905 
2906         if (!l) {
2907                 i += scnprintf(buf, sz, "link data: (null)\n");
2908                 return i;
2909         }
2910 
2911         i += scnprintf(buf, sz, "link data: %x", l->addr);
2912         i += scnprintf(buf + i, sz - i, " %x", l->state);
2913         i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2914         i += scnprintf(buf + i, sz - i, " %u", l->session);
2915         i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2916         i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2917         i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2918         i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2919         i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2920         i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2921         i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2922         i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2923         i += scnprintf(buf + i, sz - i, " %u", 0);
2924         i += scnprintf(buf + i, sz - i, " %u", 0);
2925         i += scnprintf(buf + i, sz - i, " %u", l->acked);
2926 
2927         list = &l->transmq;
2928         len = skb_queue_len(list);
2929         hskb = skb_peek(list);
2930         tskb = skb_peek_tail(list);
2931         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2932                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2933                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2934 
2935         list = &l->deferdq;
2936         len = skb_queue_len(list);
2937         hskb = skb_peek(list);
2938         tskb = skb_peek_tail(list);
2939         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2940                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2941                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2942 
2943         list = &l->backlogq;
2944         len = skb_queue_len(list);
2945         hskb = skb_peek(list);
2946         tskb = skb_peek_tail(list);
2947         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2948                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2949                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2950 
2951         list = l->inputq;
2952         len = skb_queue_len(list);
2953         hskb = skb_peek(list);
2954         tskb = skb_peek_tail(list);
2955         i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2956                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2957                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2958 
2959         if (dqueues & TIPC_DUMP_TRANSMQ) {
2960                 i += scnprintf(buf + i, sz - i, "transmq: ");
2961                 i += tipc_list_dump(&l->transmq, false, buf + i);
2962         }
2963         if (dqueues & TIPC_DUMP_BACKLOGQ) {
2964                 i += scnprintf(buf + i, sz - i,
2965                                "backlogq: <%u %u %u %u %u>, ",
2966                                l->backlog[TIPC_LOW_IMPORTANCE].len,
2967                                l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2968                                l->backlog[TIPC_HIGH_IMPORTANCE].len,
2969                                l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2970                                l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2971                 i += tipc_list_dump(&l->backlogq, false, buf + i);
2972         }
2973         if (dqueues & TIPC_DUMP_DEFERDQ) {
2974                 i += scnprintf(buf + i, sz - i, "deferdq: ");
2975                 i += tipc_list_dump(&l->deferdq, false, buf + i);
2976         }
2977         if (dqueues & TIPC_DUMP_INPUTQ) {
2978                 i += scnprintf(buf + i, sz - i, "inputq: ");
2979                 i += tipc_list_dump(l->inputq, false, buf + i);
2980         }
2981         if (dqueues & TIPC_DUMP_WAKEUP) {
2982                 i += scnprintf(buf + i, sz - i, "wakeup: ");
2983                 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2984         }
2985 
2986         return i;
2987 }
2988 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php