1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_matchll.c Match-all classifier 4 * 5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/percpu.h> 12 13 #include <net/sch_generic.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_wrapper.h> 16 17 struct cls_mall_head { 18 struct tcf_exts exts; 19 struct tcf_result res; 20 u32 handle; 21 u32 flags; 22 unsigned int in_hw_count; 23 struct tc_matchall_pcnt __percpu *pf; 24 struct rcu_work rwork; 25 bool deleting; 26 }; 27 28 TC_INDIRECT_SCOPE int mall_classify(struct sk_buff *skb, 29 const struct tcf_proto *tp, 30 struct tcf_result *res) 31 { 32 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 33 34 if (unlikely(!head)) 35 return -1; 36 37 if (tc_skip_sw(head->flags)) 38 return -1; 39 40 *res = head->res; 41 __this_cpu_inc(head->pf->rhit); 42 return tcf_exts_exec(skb, &head->exts, res); 43 } 44 45 static int mall_init(struct tcf_proto *tp) 46 { 47 return 0; 48 } 49 50 static void __mall_destroy(struct cls_mall_head *head) 51 { 52 tcf_exts_destroy(&head->exts); 53 tcf_exts_put_net(&head->exts); 54 free_percpu(head->pf); 55 kfree(head); 56 } 57 58 static void mall_destroy_work(struct work_struct *work) 59 { 60 struct cls_mall_head *head = container_of(to_rcu_work(work), 61 struct cls_mall_head, 62 rwork); 63 rtnl_lock(); 64 __mall_destroy(head); 65 rtnl_unlock(); 66 } 67 68 static void mall_destroy_hw_filter(struct tcf_proto *tp, 69 struct cls_mall_head *head, 70 unsigned long cookie, 71 struct netlink_ext_ack *extack) 72 { 73 struct tc_cls_matchall_offload cls_mall = {}; 74 struct tcf_block *block = tp->chain->block; 75 76 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 77 cls_mall.command = TC_CLSMATCHALL_DESTROY; 78 cls_mall.cookie = cookie; 79 80 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false, 81 &head->flags, &head->in_hw_count, true); 82 } 83 84 static int mall_replace_hw_filter(struct tcf_proto *tp, 85 struct cls_mall_head *head, 86 unsigned long cookie, 87 struct netlink_ext_ack *extack) 88 { 89 struct tc_cls_matchall_offload cls_mall = {}; 90 struct tcf_block *block = tp->chain->block; 91 bool skip_sw = tc_skip_sw(head->flags); 92 int err; 93 94 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); 95 if (!cls_mall.rule) 96 return -ENOMEM; 97 98 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 99 cls_mall.command = TC_CLSMATCHALL_REPLACE; 100 cls_mall.cookie = cookie; 101 102 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts, 103 cls_mall.common.extack); 104 if (err) { 105 kfree(cls_mall.rule); 106 mall_destroy_hw_filter(tp, head, cookie, NULL); 107 108 return skip_sw ? err : 0; 109 } 110 111 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, 112 skip_sw, &head->flags, &head->in_hw_count, true); 113 tc_cleanup_offload_action(&cls_mall.rule->action); 114 kfree(cls_mall.rule); 115 116 if (err) { 117 mall_destroy_hw_filter(tp, head, cookie, NULL); 118 return err; 119 } 120 121 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW)) 122 return -EINVAL; 123 124 return 0; 125 } 126 127 static void mall_destroy(struct tcf_proto *tp, bool rtnl_held, 128 struct netlink_ext_ack *extack) 129 { 130 struct cls_mall_head *head = rtnl_dereference(tp->root); 131 132 if (!head) 133 return; 134 135 tcf_unbind_filter(tp, &head->res); 136 137 if (!tc_skip_hw(head->flags)) 138 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack); 139 140 if (tcf_exts_get_net(&head->exts)) 141 tcf_queue_work(&head->rwork, mall_destroy_work); 142 else 143 __mall_destroy(head); 144 } 145 146 static void *mall_get(struct tcf_proto *tp, u32 handle) 147 { 148 struct cls_mall_head *head = rtnl_dereference(tp->root); 149 150 if (head && head->handle == handle) 151 return head; 152 153 return NULL; 154 } 155 156 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 157 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC }, 158 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 }, 159 [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 }, 160 }; 161 162 static int mall_change(struct net *net, struct sk_buff *in_skb, 163 struct tcf_proto *tp, unsigned long base, 164 u32 handle, struct nlattr **tca, 165 void **arg, u32 flags, 166 struct netlink_ext_ack *extack) 167 { 168 struct cls_mall_head *head = rtnl_dereference(tp->root); 169 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 170 bool bound_to_filter = false; 171 struct cls_mall_head *new; 172 u32 userflags = 0; 173 int err; 174 175 if (!tca[TCA_OPTIONS]) 176 return -EINVAL; 177 178 if (head) 179 return -EEXIST; 180 181 err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX, 182 tca[TCA_OPTIONS], mall_policy, NULL); 183 if (err < 0) 184 return err; 185 186 if (tb[TCA_MATCHALL_FLAGS]) { 187 userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]); 188 if (!tc_flags_valid(userflags)) 189 return -EINVAL; 190 } 191 192 new = kzalloc(sizeof(*new), GFP_KERNEL); 193 if (!new) 194 return -ENOBUFS; 195 196 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0); 197 if (err) 198 goto err_exts_init; 199 200 if (!handle) 201 handle = 1; 202 new->handle = handle; 203 new->flags = userflags; 204 new->pf = alloc_percpu(struct tc_matchall_pcnt); 205 if (!new->pf) { 206 err = -ENOMEM; 207 goto err_alloc_percpu; 208 } 209 210 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], 211 &new->exts, flags, new->flags, extack); 212 if (err < 0) 213 goto err_set_parms; 214 215 if (tb[TCA_MATCHALL_CLASSID]) { 216 new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 217 tcf_bind_filter(tp, &new->res, base); 218 bound_to_filter = true; 219 } 220 221 if (!tc_skip_hw(new->flags)) { 222 err = mall_replace_hw_filter(tp, new, (unsigned long)new, 223 extack); 224 if (err) 225 goto err_replace_hw_filter; 226 } 227 228 if (!tc_in_hw(new->flags)) 229 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 230 231 *arg = head; 232 rcu_assign_pointer(tp->root, new); 233 return 0; 234 235 err_replace_hw_filter: 236 if (bound_to_filter) 237 tcf_unbind_filter(tp, &new->res); 238 err_set_parms: 239 free_percpu(new->pf); 240 err_alloc_percpu: 241 tcf_exts_destroy(&new->exts); 242 err_exts_init: 243 kfree(new); 244 return err; 245 } 246 247 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last, 248 bool rtnl_held, struct netlink_ext_ack *extack) 249 { 250 struct cls_mall_head *head = rtnl_dereference(tp->root); 251 252 head->deleting = true; 253 *last = true; 254 return 0; 255 } 256 257 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg, 258 bool rtnl_held) 259 { 260 struct cls_mall_head *head = rtnl_dereference(tp->root); 261 262 if (arg->count < arg->skip) 263 goto skip; 264 265 if (!head || head->deleting) 266 return; 267 if (arg->fn(tp, head, arg) < 0) 268 arg->stop = 1; 269 skip: 270 arg->count++; 271 } 272 273 static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 274 void *cb_priv, struct netlink_ext_ack *extack) 275 { 276 struct cls_mall_head *head = rtnl_dereference(tp->root); 277 struct tc_cls_matchall_offload cls_mall = {}; 278 struct tcf_block *block = tp->chain->block; 279 int err; 280 281 if (tc_skip_hw(head->flags)) 282 return 0; 283 284 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); 285 if (!cls_mall.rule) 286 return -ENOMEM; 287 288 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 289 cls_mall.command = add ? 290 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; 291 cls_mall.cookie = (unsigned long)head; 292 293 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts, 294 cls_mall.common.extack); 295 if (err) { 296 kfree(cls_mall.rule); 297 298 return add && tc_skip_sw(head->flags) ? err : 0; 299 } 300 301 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, 302 &cls_mall, cb_priv, &head->flags, 303 &head->in_hw_count); 304 tc_cleanup_offload_action(&cls_mall.rule->action); 305 kfree(cls_mall.rule); 306 307 return err; 308 } 309 310 static void mall_stats_hw_filter(struct tcf_proto *tp, 311 struct cls_mall_head *head, 312 unsigned long cookie) 313 { 314 struct tc_cls_matchall_offload cls_mall = {}; 315 struct tcf_block *block = tp->chain->block; 316 317 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL); 318 cls_mall.command = TC_CLSMATCHALL_STATS; 319 cls_mall.cookie = cookie; 320 321 tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); 322 323 tcf_exts_hw_stats_update(&head->exts, &cls_mall.stats, cls_mall.use_act_stats); 324 } 325 326 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, 327 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 328 { 329 struct tc_matchall_pcnt gpf = {}; 330 struct cls_mall_head *head = fh; 331 struct nlattr *nest; 332 int cpu; 333 334 if (!head) 335 return skb->len; 336 337 if (!tc_skip_hw(head->flags)) 338 mall_stats_hw_filter(tp, head, (unsigned long)head); 339 340 t->tcm_handle = head->handle; 341 342 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 343 if (!nest) 344 goto nla_put_failure; 345 346 if (head->res.classid && 347 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) 348 goto nla_put_failure; 349 350 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags)) 351 goto nla_put_failure; 352 353 for_each_possible_cpu(cpu) { 354 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu); 355 356 gpf.rhit += pf->rhit; 357 } 358 359 if (nla_put_64bit(skb, TCA_MATCHALL_PCNT, 360 sizeof(struct tc_matchall_pcnt), 361 &gpf, TCA_MATCHALL_PAD)) 362 goto nla_put_failure; 363 364 if (tcf_exts_dump(skb, &head->exts)) 365 goto nla_put_failure; 366 367 nla_nest_end(skb, nest); 368 369 if (tcf_exts_dump_stats(skb, &head->exts) < 0) 370 goto nla_put_failure; 371 372 return skb->len; 373 374 nla_put_failure: 375 nla_nest_cancel(skb, nest); 376 return -1; 377 } 378 379 static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 380 unsigned long base) 381 { 382 struct cls_mall_head *head = fh; 383 384 tc_cls_bind_class(classid, cl, q, &head->res, base); 385 } 386 387 static struct tcf_proto_ops cls_mall_ops __read_mostly = { 388 .kind = "matchall", 389 .classify = mall_classify, 390 .init = mall_init, 391 .destroy = mall_destroy, 392 .get = mall_get, 393 .change = mall_change, 394 .delete = mall_delete, 395 .walk = mall_walk, 396 .reoffload = mall_reoffload, 397 .dump = mall_dump, 398 .bind_class = mall_bind_class, 399 .owner = THIS_MODULE, 400 }; 401 MODULE_ALIAS_NET_CLS("matchall"); 402 403 static int __init cls_mall_init(void) 404 { 405 return register_tcf_proto_ops(&cls_mall_ops); 406 } 407 408 static void __exit cls_mall_exit(void) 409 { 410 unregister_tcf_proto_ops(&cls_mall_ops); 411 } 412 413 module_init(cls_mall_init); 414 module_exit(cls_mall_exit); 415 416 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 417 MODULE_DESCRIPTION("Match-all classifier"); 418 MODULE_LICENSE("GPL v2"); 419
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.