~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/drivers/net/hw/rss_ctx.py

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #!/usr/bin/env python3
  2 # SPDX-License-Identifier: GPL-2.0
  3 
  4 import datetime
  5 import random
  6 from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt
  7 from lib.py import NetDrvEpEnv
  8 from lib.py import EthtoolFamily, NetdevFamily
  9 from lib.py import KsftSkipEx
 10 from lib.py import rand_port
 11 from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
 12 
 13 
 14 def _rss_key_str(key):
 15     return ":".join(["{:02x}".format(x) for x in key])
 16 
 17 
 18 def _rss_key_rand(length):
 19     return [random.randint(0, 255) for _ in range(length)]
 20 
 21 
 22 def _rss_key_check(cfg, data=None, context=0):
 23     if data is None:
 24         data = get_rss(cfg, context=context)
 25     if 'rss-hash-key' not in data:
 26         return
 27     non_zero = [x for x in data['rss-hash-key'] if x != 0]
 28     ksft_eq(bool(non_zero), True, comment=f"RSS key is all zero {data['rss-hash-key']}")
 29 
 30 
 31 def get_rss(cfg, context=0):
 32     return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0]
 33 
 34 
 35 def get_drop_err_sum(cfg):
 36     stats = ip("-s -s link show dev " + cfg.ifname, json=True)[0]
 37     cnt = 0
 38     for key in ['errors', 'dropped', 'over_errors', 'fifo_errors',
 39                 'length_errors', 'crc_errors', 'missed_errors',
 40                 'frame_errors']:
 41         cnt += stats["stats64"]["rx"][key]
 42     return cnt, stats["stats64"]["tx"]["carrier_changes"]
 43 
 44 
 45 def ethtool_create(cfg, act, opts):
 46     output = ethtool(f"{act} {cfg.ifname} {opts}").stdout
 47     # Output will be something like: "New RSS context is 1" or
 48     # "Added rule with ID 7", we want the integer from the end
 49     return int(output.split()[-1])
 50 
 51 
 52 def require_ntuple(cfg):
 53     features = ethtool(f"-k {cfg.ifname}", json=True)[0]
 54     if not features["ntuple-filters"]["active"]:
 55         # ntuple is more of a capability than a config knob, don't bother
 56         # trying to enable it (until some driver actually needs it).
 57         raise KsftSkipEx("Ntuple filters not enabled on the device: " + str(features["ntuple-filters"]))
 58 
 59 
 60 # Get Rx packet counts for all queues, as a simple list of integers
 61 # if @prev is specified the prev counts will be subtracted
 62 def _get_rx_cnts(cfg, prev=None):
 63     cfg.wait_hw_stats_settle()
 64     data = cfg.netdevnl.qstats_get({"ifindex": cfg.ifindex, "scope": ["queue"]}, dump=True)
 65     data = [x for x in data if x['queue-type'] == "rx"]
 66     max_q = max([x["queue-id"] for x in data])
 67     queue_stats = [0] * (max_q + 1)
 68     for q in data:
 69         queue_stats[q["queue-id"]] = q["rx-packets"]
 70         if prev and q["queue-id"] < len(prev):
 71             queue_stats[q["queue-id"]] -= prev[q["queue-id"]]
 72     return queue_stats
 73 
 74 
 75 def _send_traffic_check(cfg, port, name, params):
 76     # params is a dict with 3 possible keys:
 77     #  - "target": required, which queues we expect to get iperf traffic
 78     #  - "empty": optional, which queues should see no traffic at all
 79     #  - "noise": optional, which queues we expect to see low traffic;
 80     #             used for queues of the main context, since some background
 81     #             OS activity may use those queues while we're testing
 82     # the value for each is a list, or some other iterable containing queue ids.
 83 
 84     cnts = _get_rx_cnts(cfg)
 85     GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
 86     cnts = _get_rx_cnts(cfg, prev=cnts)
 87 
 88     directed = sum(cnts[i] for i in params['target'])
 89 
 90     ksft_ge(directed, 20000, f"traffic on {name}: " + str(cnts))
 91     if params.get('noise'):
 92         ksft_lt(sum(cnts[i] for i in params['noise']), directed / 2,
 93                 f"traffic on other queues ({name})':" + str(cnts))
 94     if params.get('empty'):
 95         ksft_eq(sum(cnts[i] for i in params['empty']), 0,
 96                 f"traffic on inactive queues ({name}): " + str(cnts))
 97 
 98 
 99 def test_rss_key_indir(cfg):
100     """Test basics like updating the main RSS key and indirection table."""
101 
102     qcnt = len(_get_rx_cnts(cfg))
103     if qcnt < 3:
104         KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
105 
106     data = get_rss(cfg)
107     want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
108     for k in want_keys:
109         if k not in data:
110             raise KsftFailEx("ethtool results missing key: " + k)
111         if not data[k]:
112             raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}")
113 
114     _rss_key_check(cfg, data=data)
115     key_len = len(data['rss-hash-key'])
116 
117     # Set the key
118     key = _rss_key_rand(key_len)
119     ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
120 
121     data = get_rss(cfg)
122     ksft_eq(key, data['rss-hash-key'])
123 
124     # Set the indirection table and the key together
125     key = _rss_key_rand(key_len)
126     ethtool(f"-X {cfg.ifname} equal 3 hkey " + _rss_key_str(key))
127     reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
128 
129     data = get_rss(cfg)
130     _rss_key_check(cfg, data=data)
131     ksft_eq(0, min(data['rss-indirection-table']))
132     ksft_eq(2, max(data['rss-indirection-table']))
133 
134     # Reset indirection table and set the key
135     key = _rss_key_rand(key_len)
136     ethtool(f"-X {cfg.ifname} default hkey " + _rss_key_str(key))
137     data = get_rss(cfg)
138     _rss_key_check(cfg, data=data)
139     ksft_eq(0, min(data['rss-indirection-table']))
140     ksft_eq(qcnt - 1, max(data['rss-indirection-table']))
141 
142     # Set the indirection table
143     ethtool(f"-X {cfg.ifname} equal 2")
144     data = get_rss(cfg)
145     ksft_eq(0, min(data['rss-indirection-table']))
146     ksft_eq(1, max(data['rss-indirection-table']))
147 
148     # Check we only get traffic on the first 2 queues
149     cnts = _get_rx_cnts(cfg)
150     GenerateTraffic(cfg).wait_pkts_and_stop(20000)
151     cnts = _get_rx_cnts(cfg, prev=cnts)
152     # 2 queues, 20k packets, must be at least 5k per queue
153     ksft_ge(cnts[0], 5000, "traffic on main context (1/2): " + str(cnts))
154     ksft_ge(cnts[1], 5000, "traffic on main context (2/2): " + str(cnts))
155     # The other queues should be unused
156     ksft_eq(sum(cnts[2:]), 0, "traffic on unused queues: " + str(cnts))
157 
158     # Restore, and check traffic gets spread again
159     reset_indir.exec()
160 
161     cnts = _get_rx_cnts(cfg)
162     GenerateTraffic(cfg).wait_pkts_and_stop(20000)
163     cnts = _get_rx_cnts(cfg, prev=cnts)
164     # First two queues get less traffic than all the rest
165     ksft_lt(sum(cnts[:2]), sum(cnts[2:]), "traffic distributed: " + str(cnts))
166 
167 
168 def test_rss_queue_reconfigure(cfg, main_ctx=True):
169     """Make sure queue changes can't override requested RSS config.
170 
171     By default main RSS table should change to include all queues.
172     When user sets a specific RSS config the driver should preserve it,
173     even when queue count changes. Driver should refuse to deactivate
174     queues used in the user-set RSS config.
175     """
176 
177     if not main_ctx:
178         require_ntuple(cfg)
179 
180     # Start with 4 queues, an arbitrary known number.
181     try:
182         qcnt = len(_get_rx_cnts(cfg))
183         ethtool(f"-L {cfg.ifname} combined 4")
184         defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
185     except:
186         raise KsftSkipEx("Not enough queues for the test or qstat not supported")
187 
188     if main_ctx:
189         ctx_id = 0
190         ctx_ref = ""
191     else:
192         ctx_id = ethtool_create(cfg, "-X", "context new")
193         ctx_ref = f"context {ctx_id}"
194         defer(ethtool, f"-X {cfg.ifname} {ctx_ref} delete")
195 
196     # Indirection table should be distributing to all queues.
197     data = get_rss(cfg, context=ctx_id)
198     ksft_eq(0, min(data['rss-indirection-table']))
199     ksft_eq(3, max(data['rss-indirection-table']))
200 
201     # Increase queues, indirection table should be distributing to all queues.
202     # It's unclear whether tables of additional contexts should be reset, too.
203     if main_ctx:
204         ethtool(f"-L {cfg.ifname} combined 5")
205         data = get_rss(cfg)
206         ksft_eq(0, min(data['rss-indirection-table']))
207         ksft_eq(4, max(data['rss-indirection-table']))
208         ethtool(f"-L {cfg.ifname} combined 4")
209 
210     # Configure the table explicitly
211     port = rand_port()
212     ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 0 1")
213     if main_ctx:
214         other_key = 'empty'
215         defer(ethtool, f"-X {cfg.ifname} default")
216     else:
217         other_key = 'noise'
218         flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
219         ntuple = ethtool_create(cfg, "-N", flow)
220         defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
221 
222     _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
223                                               other_key: (1, 2) })
224 
225     # We should be able to increase queues, but table should be left untouched
226     ethtool(f"-L {cfg.ifname} combined 5")
227     data = get_rss(cfg, context=ctx_id)
228     ksft_eq({0, 3}, set(data['rss-indirection-table']))
229 
230     _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3),
231                                               other_key: (1, 2, 4) })
232 
233     # Setting queue count to 3 should fail, queue 3 is used
234     try:
235         ethtool(f"-L {cfg.ifname} combined 3")
236     except CmdExitFailure:
237         pass
238     else:
239         raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})")
240 
241 
242 def test_rss_resize(cfg):
243     """Test resizing of the RSS table.
244 
245     Some devices dynamically increase and decrease the size of the RSS
246     indirection table based on the number of enabled queues.
247     When that happens driver must maintain the balance of entries
248     (preferably duplicating the smaller table).
249     """
250 
251     channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}})
252     ch_max = channels['combined-max']
253     qcnt = channels['combined-count']
254 
255     if ch_max < 2:
256         raise KsftSkipEx(f"Not enough queues for the test: {ch_max}")
257 
258     ethtool(f"-L {cfg.ifname} combined 2")
259     defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
260 
261     ethtool(f"-X {cfg.ifname} weight 1 7")
262     defer(ethtool, f"-X {cfg.ifname} default")
263 
264     ethtool(f"-L {cfg.ifname} combined {ch_max}")
265     data = get_rss(cfg)
266     ksft_eq(0, min(data['rss-indirection-table']))
267     ksft_eq(1, max(data['rss-indirection-table']))
268 
269     ksft_eq(7,
270             data['rss-indirection-table'].count(1) /
271             data['rss-indirection-table'].count(0),
272             f"Table imbalance after resize: {data['rss-indirection-table']}")
273 
274 
275 def test_hitless_key_update(cfg):
276     """Test that flows may be rehashed without impacting traffic.
277 
278     Some workloads may want to rehash the flows in response to an imbalance.
279     Most effective way to do that is changing the RSS key. Check that changing
280     the key does not cause link flaps or traffic disruption.
281 
282     Disrupting traffic for key update is not a bug, but makes the key
283     update unusable for rehashing under load.
284     """
285     data = get_rss(cfg)
286     key_len = len(data['rss-hash-key'])
287 
288     key = _rss_key_rand(key_len)
289 
290     tgen = GenerateTraffic(cfg)
291     try:
292         errors0, carrier0 = get_drop_err_sum(cfg)
293         t0 = datetime.datetime.now()
294         ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
295         t1 = datetime.datetime.now()
296         errors1, carrier1 = get_drop_err_sum(cfg)
297     finally:
298         tgen.wait_pkts_and_stop(5000)
299 
300     ksft_lt((t1 - t0).total_seconds(), 0.2)
301     ksft_eq(errors1 - errors1, 0)
302     ksft_eq(carrier1 - carrier0, 0)
303 
304 
305 def test_rss_context_dump(cfg):
306     """
307     Test dumping RSS contexts. This tests mostly exercises the kernel APIs.
308     """
309 
310     # Get a random key of the right size
311     data = get_rss(cfg)
312     if 'rss-hash-key' in data:
313         key_data = _rss_key_rand(len(data['rss-hash-key']))
314         key = _rss_key_str(key_data)
315     else:
316         key_data = []
317         key = "ba:ad"
318 
319     ids = []
320     try:
321         ids.append(ethtool_create(cfg, "-X", f"context new"))
322         defer(ethtool, f"-X {cfg.ifname} context {ids[-1]} delete")
323 
324         ids.append(ethtool_create(cfg, "-X", f"context new weight 1 1"))
325         defer(ethtool, f"-X {cfg.ifname} context {ids[-1]} delete")
326 
327         ids.append(ethtool_create(cfg, "-X", f"context new hkey {key}"))
328         defer(ethtool, f"-X {cfg.ifname} context {ids[-1]} delete")
329     except CmdExitFailure:
330         if not ids:
331             raise KsftSkipEx("Unable to add any contexts")
332         ksft_pr(f"Added only {len(ids)} out of 3 contexts")
333 
334     expect_tuples = set([(cfg.ifname, -1)] + [(cfg.ifname, ctx_id) for ctx_id in ids])
335 
336     # Dump all
337     ctxs = cfg.ethnl.rss_get({}, dump=True)
338     tuples = [(c['header']['dev-name'], c.get('context', -1)) for c in ctxs]
339     ksft_eq(len(tuples), len(set(tuples)), "duplicates in context dump")
340     ctx_tuples = set([ctx for ctx in tuples if ctx[0] == cfg.ifname])
341     ksft_eq(expect_tuples, ctx_tuples)
342 
343     # Sanity-check the results
344     for data in ctxs:
345         ksft_ne(set(data['indir']), {0}, "indir table is all zero")
346         ksft_ne(set(data.get('hkey', [1])), {0}, "key is all zero")
347 
348         # More specific checks
349         if len(ids) > 1 and data.get('context') == ids[1]:
350             ksft_eq(set(data['indir']), {0, 1},
351                     "ctx1 - indir table mismatch")
352         if len(ids) > 2 and data.get('context') == ids[2]:
353             ksft_eq(data['hkey'], bytes(key_data), "ctx2 - key mismatch")
354 
355     # Ifindex filter
356     ctxs = cfg.ethnl.rss_get({'header': {'dev-name': cfg.ifname}}, dump=True)
357     tuples = [(c['header']['dev-name'], c.get('context', -1)) for c in ctxs]
358     ctx_tuples = set(tuples)
359     ksft_eq(len(tuples), len(ctx_tuples), "duplicates in context dump")
360     ksft_eq(expect_tuples, ctx_tuples)
361 
362     # Skip ctx 0
363     expect_tuples.remove((cfg.ifname, -1))
364 
365     ctxs = cfg.ethnl.rss_get({'start-context': 1}, dump=True)
366     tuples = [(c['header']['dev-name'], c.get('context', -1)) for c in ctxs]
367     ksft_eq(len(tuples), len(set(tuples)), "duplicates in context dump")
368     ctx_tuples = set([ctx for ctx in tuples if ctx[0] == cfg.ifname])
369     ksft_eq(expect_tuples, ctx_tuples)
370 
371     # And finally both with ifindex and skip main
372     ctxs = cfg.ethnl.rss_get({'header': {'dev-name': cfg.ifname}, 'start-context': 1}, dump=True)
373     ctx_tuples = set([(c['header']['dev-name'], c.get('context', -1)) for c in ctxs])
374     ksft_eq(expect_tuples, ctx_tuples)
375 
376 
377 def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
378     """
379     Test separating traffic into RSS contexts.
380     The queues will be allocated 2 for each context:
381      ctx0  ctx1  ctx2  ctx3
382     [0 1] [2 3] [4 5] [6 7] ...
383     """
384 
385     require_ntuple(cfg)
386 
387     requested_ctx_cnt = ctx_cnt
388 
389     # Try to allocate more queues when necessary
390     qcnt = len(_get_rx_cnts(cfg))
391     if qcnt < 2 + 2 * ctx_cnt:
392         try:
393             ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
394             ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
395             defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
396         except:
397             raise KsftSkipEx("Not enough queues for the test")
398 
399     ports = []
400 
401     # Use queues 0 and 1 for normal traffic
402     ethtool(f"-X {cfg.ifname} equal 2")
403     defer(ethtool, f"-X {cfg.ifname} default")
404 
405     for i in range(ctx_cnt):
406         want_cfg = f"start {2 + i * 2} equal 2"
407         create_cfg = want_cfg if create_with_cfg else ""
408 
409         try:
410             ctx_id = ethtool_create(cfg, "-X", f"context new {create_cfg}")
411             defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
412         except CmdExitFailure:
413             # try to carry on and skip at the end
414             if i == 0:
415                 raise
416             ksft_pr(f"Failed to create context {i + 1}, trying to test what we got")
417             ctx_cnt = i
418             break
419 
420         _rss_key_check(cfg, context=ctx_id)
421 
422         if not create_with_cfg:
423             ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}")
424             _rss_key_check(cfg, context=ctx_id)
425 
426         # Sanity check the context we just created
427         data = get_rss(cfg, ctx_id)
428         ksft_eq(min(data['rss-indirection-table']), 2 + i * 2, "Unexpected context cfg: " + str(data))
429         ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data))
430 
431         ports.append(rand_port())
432         flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
433         ntuple = ethtool_create(cfg, "-N", flow)
434         defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
435 
436     for i in range(ctx_cnt):
437         _send_traffic_check(cfg, ports[i], f"context {i}",
438                             { 'target': (2+i*2, 3+i*2),
439                               'noise': (0, 1),
440                               'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt)) })
441 
442     if requested_ctx_cnt != ctx_cnt:
443         raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
444 
445 
446 def test_rss_context4(cfg):
447     test_rss_context(cfg, 4)
448 
449 
450 def test_rss_context32(cfg):
451     test_rss_context(cfg, 32)
452 
453 
454 def test_rss_context4_create_with_cfg(cfg):
455     test_rss_context(cfg, 4, create_with_cfg=True)
456 
457 
458 def test_rss_context_queue_reconfigure(cfg):
459     test_rss_queue_reconfigure(cfg, main_ctx=False)
460 
461 
462 def test_rss_context_out_of_order(cfg, ctx_cnt=4):
463     """
464     Test separating traffic into RSS contexts.
465     Contexts are removed in semi-random order, and steering re-tested
466     to make sure removal doesn't break steering to surviving contexts.
467     Test requires 3 contexts to work.
468     """
469 
470     require_ntuple(cfg)
471 
472     requested_ctx_cnt = ctx_cnt
473 
474     # Try to allocate more queues when necessary
475     qcnt = len(_get_rx_cnts(cfg))
476     if qcnt < 2 + 2 * ctx_cnt:
477         try:
478             ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}")
479             ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}")
480             defer(ethtool, f"-L {cfg.ifname} combined {qcnt}")
481         except:
482             raise KsftSkipEx("Not enough queues for the test")
483 
484     ntuple = []
485     ctx = []
486     ports = []
487 
488     def remove_ctx(idx):
489         ntuple[idx].exec()
490         ntuple[idx] = None
491         ctx[idx].exec()
492         ctx[idx] = None
493 
494     def check_traffic():
495         for i in range(ctx_cnt):
496             if ctx[i]:
497                 expected = {
498                     'target': (2+i*2, 3+i*2),
499                     'noise': (0, 1),
500                     'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt))
501                 }
502             else:
503                 expected = {
504                     'target': (0, 1),
505                     'empty':  range(2, 2+2*ctx_cnt)
506                 }
507 
508             _send_traffic_check(cfg, ports[i], f"context {i}", expected)
509 
510     # Use queues 0 and 1 for normal traffic
511     ethtool(f"-X {cfg.ifname} equal 2")
512     defer(ethtool, f"-X {cfg.ifname} default")
513 
514     for i in range(ctx_cnt):
515         ctx_id = ethtool_create(cfg, "-X", f"context new start {2 + i * 2} equal 2")
516         ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete"))
517 
518         ports.append(rand_port())
519         flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
520         ntuple_id = ethtool_create(cfg, "-N", flow)
521         ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}"))
522 
523     check_traffic()
524 
525     # Remove middle context
526     remove_ctx(ctx_cnt // 2)
527     check_traffic()
528 
529     # Remove first context
530     remove_ctx(0)
531     check_traffic()
532 
533     # Remove last context
534     remove_ctx(-1)
535     check_traffic()
536 
537     if requested_ctx_cnt != ctx_cnt:
538         raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}")
539 
540 
541 def test_rss_context_overlap(cfg, other_ctx=0):
542     """
543     Test contexts overlapping with each other.
544     Use 4 queues for the main context, but only queues 2 and 3 for context 1.
545     """
546 
547     require_ntuple(cfg)
548 
549     queue_cnt = len(_get_rx_cnts(cfg))
550     if queue_cnt < 4:
551         try:
552             ksft_pr(f"Increasing queue count {queue_cnt} -> 4")
553             ethtool(f"-L {cfg.ifname} combined 4")
554             defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}")
555         except:
556             raise KsftSkipEx("Not enough queues for the test")
557 
558     if other_ctx == 0:
559         ethtool(f"-X {cfg.ifname} equal 4")
560         defer(ethtool, f"-X {cfg.ifname} default")
561     else:
562         other_ctx = ethtool_create(cfg, "-X", "context new")
563         ethtool(f"-X {cfg.ifname} context {other_ctx} equal 4")
564         defer(ethtool, f"-X {cfg.ifname} context {other_ctx} delete")
565 
566     ctx_id = ethtool_create(cfg, "-X", "context new")
567     ethtool(f"-X {cfg.ifname} context {ctx_id} start 2 equal 2")
568     defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
569 
570     port = rand_port()
571     if other_ctx:
572         flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}"
573         ntuple_id = ethtool_create(cfg, "-N", flow)
574         ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
575 
576     # Test the main context
577     cnts = _get_rx_cnts(cfg)
578     GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
579     cnts = _get_rx_cnts(cfg, prev=cnts)
580 
581     ksft_ge(sum(cnts[ :4]), 20000, "traffic on main context: " + str(cnts))
582     ksft_ge(sum(cnts[ :2]),  7000, "traffic on main context (1/2): " + str(cnts))
583     ksft_ge(sum(cnts[2:4]),  7000, "traffic on main context (2/2): " + str(cnts))
584     if other_ctx == 0:
585         ksft_eq(sum(cnts[4: ]),     0, "traffic on other queues: " + str(cnts))
586 
587     # Now create a rule for context 1 and make sure traffic goes to a subset
588     if other_ctx:
589         ntuple.exec()
590     flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
591     ntuple_id = ethtool_create(cfg, "-N", flow)
592     defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
593 
594     cnts = _get_rx_cnts(cfg)
595     GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000)
596     cnts = _get_rx_cnts(cfg, prev=cnts)
597 
598     directed = sum(cnts[2:4])
599     ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context: " + str(cnts))
600     ksft_ge(directed, 20000, "traffic on extra context: " + str(cnts))
601     if other_ctx == 0:
602         ksft_eq(sum(cnts[4: ]),     0, "traffic on other queues: " + str(cnts))
603 
604 
605 def test_rss_context_overlap2(cfg):
606     test_rss_context_overlap(cfg, True)
607 
608 
609 def main() -> None:
610     with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
611         cfg.ethnl = EthtoolFamily()
612         cfg.netdevnl = NetdevFamily()
613 
614         ksft_run([test_rss_key_indir, test_rss_queue_reconfigure,
615                   test_rss_resize, test_hitless_key_update,
616                   test_rss_context, test_rss_context4, test_rss_context32,
617                   test_rss_context_dump, test_rss_context_queue_reconfigure,
618                   test_rss_context_overlap, test_rss_context_overlap2,
619                   test_rss_context_out_of_order, test_rss_context4_create_with_cfg],
620                  args=(cfg, ))
621     ksft_exit()
622 
623 
624 if __name__ == "__main__":
625     main()

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php