1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * Copyright 2023 Bootlin 3 * Copyright 2023 Bootlin 4 * 4 * 5 */ 5 */ 6 #include "common.h" 6 #include "common.h" 7 #include "netlink.h" 7 #include "netlink.h" 8 8 9 #include <linux/phy.h> 9 #include <linux/phy.h> 10 #include <linux/phy_link_topology.h> 10 #include <linux/phy_link_topology.h> 11 #include <linux/sfp.h> 11 #include <linux/sfp.h> 12 12 13 struct phy_req_info { 13 struct phy_req_info { 14 struct ethnl_req_info base; 14 struct ethnl_req_info base; 15 struct phy_device_node *pdn; 15 struct phy_device_node *pdn; 16 }; 16 }; 17 17 18 #define PHY_REQINFO(__req_base) \ 18 #define PHY_REQINFO(__req_base) \ 19 container_of(__req_base, struct phy_re 19 container_of(__req_base, struct phy_req_info, base) 20 20 21 const struct nla_policy ethnl_phy_get_policy[E 21 const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = { 22 [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NE 22 [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), 23 }; 23 }; 24 24 25 /* Caller holds rtnl */ 25 /* Caller holds rtnl */ 26 static ssize_t 26 static ssize_t 27 ethnl_phy_reply_size(const struct ethnl_req_in 27 ethnl_phy_reply_size(const struct ethnl_req_info *req_base, 28 struct netlink_ext_ack *e 28 struct netlink_ext_ack *extack) 29 { 29 { 30 struct phy_req_info *req_info = PHY_RE 30 struct phy_req_info *req_info = PHY_REQINFO(req_base); 31 struct phy_device_node *pdn = req_info 31 struct phy_device_node *pdn = req_info->pdn; 32 struct phy_device *phydev = pdn->phy; 32 struct phy_device *phydev = pdn->phy; 33 size_t size = 0; 33 size_t size = 0; 34 34 35 ASSERT_RTNL(); 35 ASSERT_RTNL(); 36 36 37 /* ETHTOOL_A_PHY_INDEX */ 37 /* ETHTOOL_A_PHY_INDEX */ 38 size += nla_total_size(sizeof(u32)); 38 size += nla_total_size(sizeof(u32)); 39 39 40 /* ETHTOOL_A_DRVNAME */ 40 /* ETHTOOL_A_DRVNAME */ 41 if (phydev->drv) 41 if (phydev->drv) 42 size += nla_total_size(strlen( 42 size += nla_total_size(strlen(phydev->drv->name) + 1); 43 43 44 /* ETHTOOL_A_NAME */ 44 /* ETHTOOL_A_NAME */ 45 size += nla_total_size(strlen(dev_name 45 size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1); 46 46 47 /* ETHTOOL_A_PHY_UPSTREAM_TYPE */ 47 /* ETHTOOL_A_PHY_UPSTREAM_TYPE */ 48 size += nla_total_size(sizeof(u32)); 48 size += nla_total_size(sizeof(u32)); 49 49 50 if (phy_on_sfp(phydev)) { 50 if (phy_on_sfp(phydev)) { 51 const char *upstream_sfp_name 51 const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus); 52 52 53 /* ETHTOOL_A_PHY_UPSTREAM_SFP_ 53 /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */ 54 if (upstream_sfp_name) 54 if (upstream_sfp_name) 55 size += nla_total_size 55 size += nla_total_size(strlen(upstream_sfp_name) + 1); 56 56 57 /* ETHTOOL_A_PHY_UPSTREAM_INDE 57 /* ETHTOOL_A_PHY_UPSTREAM_INDEX */ 58 size += nla_total_size(sizeof( 58 size += nla_total_size(sizeof(u32)); 59 } 59 } 60 60 61 /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME * 61 /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */ 62 if (phydev->sfp_bus) { 62 if (phydev->sfp_bus) { 63 const char *sfp_name = sfp_get 63 const char *sfp_name = sfp_get_name(phydev->sfp_bus); 64 64 65 if (sfp_name) 65 if (sfp_name) 66 size += nla_total_size 66 size += nla_total_size(strlen(sfp_name) + 1); 67 } 67 } 68 68 69 return size; 69 return size; 70 } 70 } 71 71 72 static int 72 static int 73 ethnl_phy_fill_reply(const struct ethnl_req_in 73 ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb) 74 { 74 { 75 struct phy_req_info *req_info = PHY_RE 75 struct phy_req_info *req_info = PHY_REQINFO(req_base); 76 struct phy_device_node *pdn = req_info 76 struct phy_device_node *pdn = req_info->pdn; 77 struct phy_device *phydev = pdn->phy; 77 struct phy_device *phydev = pdn->phy; 78 enum phy_upstream ptype; 78 enum phy_upstream ptype; 79 79 80 ptype = pdn->upstream_type; 80 ptype = pdn->upstream_type; 81 81 82 if (nla_put_u32(skb, ETHTOOL_A_PHY_IND 82 if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) || 83 nla_put_string(skb, ETHTOOL_A_PHY_ 83 nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) || 84 nla_put_u32(skb, ETHTOOL_A_PHY_UPS 84 nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype)) 85 return -EMSGSIZE; 85 return -EMSGSIZE; 86 86 87 if (phydev->drv && 87 if (phydev->drv && 88 nla_put_string(skb, ETHTOOL_A_PHY_ 88 nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name)) 89 return -EMSGSIZE; 89 return -EMSGSIZE; 90 90 91 if (ptype == PHY_UPSTREAM_PHY) { 91 if (ptype == PHY_UPSTREAM_PHY) { 92 struct phy_device *upstream = 92 struct phy_device *upstream = pdn->upstream.phydev; 93 const char *sfp_upstream_name; 93 const char *sfp_upstream_name; 94 94 95 /* Parent index */ 95 /* Parent index */ 96 if (nla_put_u32(skb, ETHTOOL_A 96 if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex)) 97 return -EMSGSIZE; 97 return -EMSGSIZE; 98 98 99 if (pdn->parent_sfp_bus) { 99 if (pdn->parent_sfp_bus) { 100 sfp_upstream_name = sf 100 sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus); 101 if (sfp_upstream_name 101 if (sfp_upstream_name && 102 nla_put_string(skb 102 nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, 103 sfp 103 sfp_upstream_name)) 104 return -EMSGSI 104 return -EMSGSIZE; 105 } 105 } 106 } 106 } 107 107 108 if (phydev->sfp_bus) { 108 if (phydev->sfp_bus) { 109 const char *sfp_name = sfp_get 109 const char *sfp_name = sfp_get_name(phydev->sfp_bus); 110 110 111 if (sfp_name && 111 if (sfp_name && 112 nla_put_string(skb, ETHTOO 112 nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, 113 sfp_name)) 113 sfp_name)) 114 return -EMSGSIZE; 114 return -EMSGSIZE; 115 } 115 } 116 116 117 return 0; 117 return 0; 118 } 118 } 119 119 120 static int ethnl_phy_parse_request(struct ethn 120 static int ethnl_phy_parse_request(struct ethnl_req_info *req_base, 121 struct nlat 121 struct nlattr **tb, 122 struct netl 122 struct netlink_ext_ack *extack) 123 { 123 { 124 struct phy_link_topology *topo = req_b 124 struct phy_link_topology *topo = req_base->dev->link_topo; 125 struct phy_req_info *req_info = PHY_RE 125 struct phy_req_info *req_info = PHY_REQINFO(req_base); 126 struct phy_device *phydev; 126 struct phy_device *phydev; 127 127 128 phydev = ethnl_req_get_phydev(req_base 128 phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER], 129 extack); 129 extack); 130 if (!phydev) 130 if (!phydev) 131 return 0; 131 return 0; 132 132 133 if (IS_ERR(phydev)) 133 if (IS_ERR(phydev)) 134 return PTR_ERR(phydev); 134 return PTR_ERR(phydev); 135 135 136 if (!topo) 136 if (!topo) 137 return 0; 137 return 0; 138 138 139 req_info->pdn = xa_load(&topo->phys, p 139 req_info->pdn = xa_load(&topo->phys, phydev->phyindex); 140 140 141 return 0; 141 return 0; 142 } 142 } 143 143 144 int ethnl_phy_doit(struct sk_buff *skb, struct 144 int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info) 145 { 145 { 146 struct phy_req_info req_info = {}; 146 struct phy_req_info req_info = {}; 147 struct nlattr **tb = info->attrs; 147 struct nlattr **tb = info->attrs; 148 struct sk_buff *rskb; 148 struct sk_buff *rskb; 149 void *reply_payload; 149 void *reply_payload; 150 int reply_len; 150 int reply_len; 151 int ret; 151 int ret; 152 152 153 ret = ethnl_parse_header_dev_get(&req_ 153 ret = ethnl_parse_header_dev_get(&req_info.base, 154 tb[ET 154 tb[ETHTOOL_A_PHY_HEADER], 155 genl_ 155 genl_info_net(info), info->extack, 156 true) 156 true); 157 if (ret < 0) 157 if (ret < 0) 158 return ret; 158 return ret; 159 159 160 rtnl_lock(); 160 rtnl_lock(); 161 161 162 ret = ethnl_phy_parse_request(&req_inf 162 ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack); 163 if (ret < 0) 163 if (ret < 0) 164 goto err_unlock_rtnl; 164 goto err_unlock_rtnl; 165 165 166 /* No PHY, return early */ 166 /* No PHY, return early */ 167 if (!req_info.pdn) 167 if (!req_info.pdn) 168 goto err_unlock_rtnl; 168 goto err_unlock_rtnl; 169 169 170 ret = ethnl_phy_reply_size(&req_info.b 170 ret = ethnl_phy_reply_size(&req_info.base, info->extack); 171 if (ret < 0) 171 if (ret < 0) 172 goto err_unlock_rtnl; 172 goto err_unlock_rtnl; 173 reply_len = ret + ethnl_reply_header_s 173 reply_len = ret + ethnl_reply_header_size(); 174 174 175 rskb = ethnl_reply_init(reply_len, req 175 rskb = ethnl_reply_init(reply_len, req_info.base.dev, 176 ETHTOOL_MSG_PH 176 ETHTOOL_MSG_PHY_GET_REPLY, 177 ETHTOOL_A_PHY_ 177 ETHTOOL_A_PHY_HEADER, 178 info, &reply_p 178 info, &reply_payload); 179 if (!rskb) { 179 if (!rskb) { 180 ret = -ENOMEM; 180 ret = -ENOMEM; 181 goto err_unlock_rtnl; 181 goto err_unlock_rtnl; 182 } 182 } 183 183 184 ret = ethnl_phy_fill_reply(&req_info.b 184 ret = ethnl_phy_fill_reply(&req_info.base, rskb); 185 if (ret) 185 if (ret) 186 goto err_free_msg; 186 goto err_free_msg; 187 187 188 rtnl_unlock(); 188 rtnl_unlock(); 189 ethnl_parse_header_dev_put(&req_info.b 189 ethnl_parse_header_dev_put(&req_info.base); 190 genlmsg_end(rskb, reply_payload); 190 genlmsg_end(rskb, reply_payload); 191 191 192 return genlmsg_reply(rskb, info); 192 return genlmsg_reply(rskb, info); 193 193 194 err_free_msg: 194 err_free_msg: 195 nlmsg_free(rskb); 195 nlmsg_free(rskb); 196 err_unlock_rtnl: 196 err_unlock_rtnl: 197 rtnl_unlock(); 197 rtnl_unlock(); 198 ethnl_parse_header_dev_put(&req_info.b 198 ethnl_parse_header_dev_put(&req_info.base); 199 return ret; 199 return ret; 200 } 200 } 201 201 202 struct ethnl_phy_dump_ctx { 202 struct ethnl_phy_dump_ctx { 203 struct phy_req_info *phy_req_info; 203 struct phy_req_info *phy_req_info; 204 unsigned long ifindex; 204 unsigned long ifindex; 205 unsigned long phy_index; 205 unsigned long phy_index; 206 }; 206 }; 207 207 208 int ethnl_phy_start(struct netlink_callback *c 208 int ethnl_phy_start(struct netlink_callback *cb) 209 { 209 { 210 const struct genl_info *info = genl_in 210 const struct genl_info *info = genl_info_dump(cb); 211 struct ethnl_phy_dump_ctx *ctx = (void 211 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; 212 int ret; 212 int ret; 213 213 214 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb- 214 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 215 215 216 ctx->phy_req_info = kzalloc(sizeof(*ct 216 ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL); 217 if (!ctx->phy_req_info) 217 if (!ctx->phy_req_info) 218 return -ENOMEM; 218 return -ENOMEM; 219 219 220 ret = ethnl_parse_header_dev_get(&ctx- 220 ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base, 221 info- 221 info->attrs[ETHTOOL_A_PHY_HEADER], 222 sock_ 222 sock_net(cb->skb->sk), cb->extack, 223 false 223 false); 224 ctx->ifindex = 0; 224 ctx->ifindex = 0; 225 ctx->phy_index = 0; 225 ctx->phy_index = 0; 226 226 227 if (ret) 227 if (ret) 228 kfree(ctx->phy_req_info); 228 kfree(ctx->phy_req_info); 229 229 230 return ret; 230 return ret; 231 } 231 } 232 232 233 int ethnl_phy_done(struct netlink_callback *cb 233 int ethnl_phy_done(struct netlink_callback *cb) 234 { 234 { 235 struct ethnl_phy_dump_ctx *ctx = (void 235 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; 236 236 237 if (ctx->phy_req_info->base.dev) 237 if (ctx->phy_req_info->base.dev) 238 ethnl_parse_header_dev_put(&ct 238 ethnl_parse_header_dev_put(&ctx->phy_req_info->base); 239 239 240 kfree(ctx->phy_req_info); 240 kfree(ctx->phy_req_info); 241 241 242 return 0; 242 return 0; 243 } 243 } 244 244 245 static int ethnl_phy_dump_one_dev(struct sk_bu 245 static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev, 246 struct netli 246 struct netlink_callback *cb) 247 { 247 { 248 struct ethnl_phy_dump_ctx *ctx = (void 248 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; 249 struct phy_req_info *pri = ctx->phy_re 249 struct phy_req_info *pri = ctx->phy_req_info; 250 struct phy_device_node *pdn; 250 struct phy_device_node *pdn; 251 int ret = 0; 251 int ret = 0; 252 void *ehdr; 252 void *ehdr; 253 253 254 if (!dev->link_topo) 254 if (!dev->link_topo) 255 return 0; 255 return 0; 256 256 257 xa_for_each_start(&dev->link_topo->phy 257 xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) { 258 ehdr = ethnl_dump_put(skb, cb, 258 ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY); 259 if (!ehdr) { 259 if (!ehdr) { 260 ret = -EMSGSIZE; 260 ret = -EMSGSIZE; 261 break; 261 break; 262 } 262 } 263 263 264 ret = ethnl_fill_reply_header( 264 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER); 265 if (ret < 0) { 265 if (ret < 0) { 266 genlmsg_cancel(skb, eh 266 genlmsg_cancel(skb, ehdr); 267 break; 267 break; 268 } 268 } 269 269 270 pri->pdn = pdn; 270 pri->pdn = pdn; 271 ret = ethnl_phy_fill_reply(&pr 271 ret = ethnl_phy_fill_reply(&pri->base, skb); 272 if (ret < 0) { 272 if (ret < 0) { 273 genlmsg_cancel(skb, eh 273 genlmsg_cancel(skb, ehdr); 274 break; 274 break; 275 } 275 } 276 276 277 genlmsg_end(skb, ehdr); 277 genlmsg_end(skb, ehdr); 278 } 278 } 279 279 280 return ret; 280 return ret; 281 } 281 } 282 282 283 int ethnl_phy_dumpit(struct sk_buff *skb, stru 283 int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 284 { 284 { 285 struct ethnl_phy_dump_ctx *ctx = (void 285 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; 286 struct net *net = sock_net(skb->sk); 286 struct net *net = sock_net(skb->sk); 287 struct net_device *dev; 287 struct net_device *dev; 288 int ret = 0; 288 int ret = 0; 289 289 290 rtnl_lock(); 290 rtnl_lock(); 291 291 292 if (ctx->phy_req_info->base.dev) { 292 if (ctx->phy_req_info->base.dev) { 293 ret = ethnl_phy_dump_one_dev(s 293 ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb); 294 } else { 294 } else { 295 for_each_netdev_dump(net, dev, 295 for_each_netdev_dump(net, dev, ctx->ifindex) { 296 ret = ethnl_phy_dump_o 296 ret = ethnl_phy_dump_one_dev(skb, dev, cb); 297 if (ret) 297 if (ret) 298 break; 298 break; 299 299 300 ctx->phy_index = 0; 300 ctx->phy_index = 0; 301 } 301 } 302 } 302 } 303 rtnl_unlock(); 303 rtnl_unlock(); 304 304 305 return ret; 305 return ret; 306 } 306 } 307 307
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.