br_vlan.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  8. const void *ptr)
  9. {
  10. const struct net_bridge_vlan *vle = ptr;
  11. u16 vid = *(u16 *)arg->key;
  12. return vle->vid != vid;
  13. }
  14. static const struct rhashtable_params br_vlan_rht_params = {
  15. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  16. .key_offset = offsetof(struct net_bridge_vlan, vid),
  17. .key_len = sizeof(u16),
  18. .nelem_hint = 3,
  19. .locks_mul = 1,
  20. .max_size = VLAN_N_VID,
  21. .obj_cmpfn = br_vlan_cmp,
  22. .automatic_shrinking = true,
  23. };
  24. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  25. {
  26. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  27. }
  28. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  29. {
  30. if (vg->pvid == vid)
  31. return;
  32. smp_wmb();
  33. vg->pvid = vid;
  34. }
  35. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  36. {
  37. if (vg->pvid != vid)
  38. return;
  39. smp_wmb();
  40. vg->pvid = 0;
  41. }
  42. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  43. {
  44. struct net_bridge_vlan_group *vg;
  45. if (br_vlan_is_master(v))
  46. vg = br_vlan_group(v->br);
  47. else
  48. vg = nbp_vlan_group(v->port);
  49. if (flags & BRIDGE_VLAN_INFO_PVID)
  50. __vlan_add_pvid(vg, v->vid);
  51. else
  52. __vlan_delete_pvid(vg, v->vid);
  53. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  54. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  55. else
  56. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  57. }
  58. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  59. u16 vid, u16 flags)
  60. {
  61. struct switchdev_obj_port_vlan v = {
  62. .obj.orig_dev = dev,
  63. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  64. .flags = flags,
  65. .vid_begin = vid,
  66. .vid_end = vid,
  67. };
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = switchdev_port_obj_add(dev, &v.obj);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. struct switchdev_obj_port_vlan v = {
  104. .obj.orig_dev = dev,
  105. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  106. .vid_begin = vid,
  107. .vid_end = vid,
  108. };
  109. int err;
  110. /* Try switchdev op first. In case it is not supported, fallback to
  111. * 8021q del.
  112. */
  113. err = switchdev_port_obj_del(dev, &v.obj);
  114. if (err == -EOPNOTSUPP) {
  115. vlan_vid_del(dev, br->vlan_proto, vid);
  116. return 0;
  117. }
  118. return err;
  119. }
  120. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  121. * a reference is taken to the master vlan before returning.
  122. */
  123. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  124. {
  125. struct net_bridge_vlan_group *vg;
  126. struct net_bridge_vlan *masterv;
  127. vg = br_vlan_group(br);
  128. masterv = br_vlan_find(vg, vid);
  129. if (!masterv) {
  130. /* missing global ctx, create it now */
  131. if (br_vlan_add(br, vid, 0))
  132. return NULL;
  133. masterv = br_vlan_find(vg, vid);
  134. if (WARN_ON(!masterv))
  135. return NULL;
  136. }
  137. atomic_inc(&masterv->refcnt);
  138. return masterv;
  139. }
  140. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  141. {
  142. struct net_bridge_vlan *v;
  143. v = container_of(rcu, struct net_bridge_vlan, rcu);
  144. WARN_ON(!br_vlan_is_master(v));
  145. free_percpu(v->stats);
  146. v->stats = NULL;
  147. kfree(v);
  148. }
  149. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  150. {
  151. struct net_bridge_vlan_group *vg;
  152. if (!br_vlan_is_master(masterv))
  153. return;
  154. vg = br_vlan_group(masterv->br);
  155. if (atomic_dec_and_test(&masterv->refcnt)) {
  156. rhashtable_remove_fast(&vg->vlan_hash,
  157. &masterv->vnode, br_vlan_rht_params);
  158. __vlan_del_list(masterv);
  159. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  160. }
  161. }
  162. /* This is the shared VLAN add function which works for both ports and bridge
  163. * devices. There are four possible calls to this function in terms of the
  164. * vlan entry type:
  165. * 1. vlan is being added on a port (no master flags, global entry exists)
  166. * 2. vlan is being added on a bridge (both master and brentry flags)
  167. * 3. vlan is being added on a port, but a global entry didn't exist which
  168. * is being created right now (master flag set, brentry flag unset), the
  169. * global entry is used for global per-vlan features, but not for filtering
  170. * 4. same as 3 but with both master and brentry flags set so the entry
  171. * will be used for filtering in both the port and the bridge
  172. */
  173. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  174. {
  175. struct net_bridge_vlan *masterv = NULL;
  176. struct net_bridge_port *p = NULL;
  177. struct net_bridge_vlan_group *vg;
  178. struct net_device *dev;
  179. struct net_bridge *br;
  180. int err;
  181. if (br_vlan_is_master(v)) {
  182. br = v->br;
  183. dev = br->dev;
  184. vg = br_vlan_group(br);
  185. } else {
  186. p = v->port;
  187. br = p->br;
  188. dev = p->dev;
  189. vg = nbp_vlan_group(p);
  190. }
  191. if (p) {
  192. /* Add VLAN to the device filter if it is supported.
  193. * This ensures tagged traffic enters the bridge when
  194. * promiscuous mode is disabled by br_manage_promisc().
  195. */
  196. err = __vlan_vid_add(dev, br, v->vid, flags);
  197. if (err)
  198. goto out;
  199. /* need to work on the master vlan too */
  200. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  201. err = br_vlan_add(br, v->vid, flags |
  202. BRIDGE_VLAN_INFO_BRENTRY);
  203. if (err)
  204. goto out_filt;
  205. }
  206. masterv = br_vlan_get_master(br, v->vid);
  207. if (!masterv)
  208. goto out_filt;
  209. v->brvlan = masterv;
  210. v->stats = masterv->stats;
  211. }
  212. /* Add the dev mac and count the vlan only if it's usable */
  213. if (br_vlan_should_use(v)) {
  214. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  215. if (err) {
  216. br_err(br, "failed insert local address into bridge forwarding table\n");
  217. goto out_filt;
  218. }
  219. vg->num_vlans++;
  220. }
  221. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  222. br_vlan_rht_params);
  223. if (err)
  224. goto out_fdb_insert;
  225. __vlan_add_list(v);
  226. __vlan_add_flags(v, flags);
  227. out:
  228. return err;
  229. out_fdb_insert:
  230. if (br_vlan_should_use(v)) {
  231. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  232. vg->num_vlans--;
  233. }
  234. out_filt:
  235. if (p) {
  236. __vlan_vid_del(dev, br, v->vid);
  237. if (masterv) {
  238. br_vlan_put_master(masterv);
  239. v->brvlan = NULL;
  240. }
  241. }
  242. goto out;
  243. }
  244. static int __vlan_del(struct net_bridge_vlan *v)
  245. {
  246. struct net_bridge_vlan *masterv = v;
  247. struct net_bridge_vlan_group *vg;
  248. struct net_bridge_port *p = NULL;
  249. int err = 0;
  250. if (br_vlan_is_master(v)) {
  251. vg = br_vlan_group(v->br);
  252. } else {
  253. p = v->port;
  254. vg = nbp_vlan_group(v->port);
  255. masterv = v->brvlan;
  256. }
  257. __vlan_delete_pvid(vg, v->vid);
  258. if (p) {
  259. err = __vlan_vid_del(p->dev, p->br, v->vid);
  260. if (err)
  261. goto out;
  262. }
  263. if (br_vlan_should_use(v)) {
  264. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  265. vg->num_vlans--;
  266. }
  267. if (masterv != v) {
  268. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  269. br_vlan_rht_params);
  270. __vlan_del_list(v);
  271. kfree_rcu(v, rcu);
  272. }
  273. br_vlan_put_master(masterv);
  274. out:
  275. return err;
  276. }
  277. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  278. {
  279. WARN_ON(!list_empty(&vg->vlan_list));
  280. rhashtable_destroy(&vg->vlan_hash);
  281. kfree(vg);
  282. }
  283. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  284. {
  285. struct net_bridge_vlan *vlan, *tmp;
  286. __vlan_delete_pvid(vg, vg->pvid);
  287. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  288. __vlan_del(vlan);
  289. }
  290. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  291. struct net_bridge_vlan_group *vg,
  292. struct sk_buff *skb)
  293. {
  294. struct br_vlan_stats *stats;
  295. struct net_bridge_vlan *v;
  296. u16 vid;
  297. /* If this packet was not filtered at input, let it pass */
  298. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  299. goto out;
  300. /* At this point, we know that the frame was filtered and contains
  301. * a valid vlan id. If the vlan id has untagged flag set,
  302. * send untagged; otherwise, send tagged.
  303. */
  304. br_vlan_get_tag(skb, &vid);
  305. v = br_vlan_find(vg, vid);
  306. /* Vlan entry must be configured at this point. The
  307. * only exception is the bridge is set in promisc mode and the
  308. * packet is destined for the bridge device. In this case
  309. * pass the packet as is.
  310. */
  311. if (!v || !br_vlan_should_use(v)) {
  312. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  313. goto out;
  314. } else {
  315. kfree_skb(skb);
  316. return NULL;
  317. }
  318. }
  319. if (br->vlan_stats_enabled) {
  320. stats = this_cpu_ptr(v->stats);
  321. u64_stats_update_begin(&stats->syncp);
  322. stats->tx_bytes += skb->len;
  323. stats->tx_packets++;
  324. u64_stats_update_end(&stats->syncp);
  325. }
  326. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  327. skb->vlan_tci = 0;
  328. out:
  329. return skb;
  330. }
  331. /* Called under RCU */
  332. static bool __allowed_ingress(const struct net_bridge *br,
  333. struct net_bridge_vlan_group *vg,
  334. struct sk_buff *skb, u16 *vid)
  335. {
  336. struct br_vlan_stats *stats;
  337. struct net_bridge_vlan *v;
  338. bool tagged;
  339. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  340. /* If vlan tx offload is disabled on bridge device and frame was
  341. * sent from vlan device on the bridge device, it does not have
  342. * HW accelerated vlan tag.
  343. */
  344. if (unlikely(!skb_vlan_tag_present(skb) &&
  345. skb->protocol == br->vlan_proto)) {
  346. skb = skb_vlan_untag(skb);
  347. if (unlikely(!skb))
  348. return false;
  349. }
  350. if (!br_vlan_get_tag(skb, vid)) {
  351. /* Tagged frame */
  352. if (skb->vlan_proto != br->vlan_proto) {
  353. /* Protocol-mismatch, empty out vlan_tci for new tag */
  354. skb_push(skb, ETH_HLEN);
  355. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  356. skb_vlan_tag_get(skb));
  357. if (unlikely(!skb))
  358. return false;
  359. skb_pull(skb, ETH_HLEN);
  360. skb_reset_mac_len(skb);
  361. *vid = 0;
  362. tagged = false;
  363. } else {
  364. tagged = true;
  365. }
  366. } else {
  367. /* Untagged frame */
  368. tagged = false;
  369. }
  370. if (!*vid) {
  371. u16 pvid = br_get_pvid(vg);
  372. /* Frame had a tag with VID 0 or did not have a tag.
  373. * See if pvid is set on this port. That tells us which
  374. * vlan untagged or priority-tagged traffic belongs to.
  375. */
  376. if (!pvid)
  377. goto drop;
  378. /* PVID is set on this port. Any untagged or priority-tagged
  379. * ingress frame is considered to belong to this vlan.
  380. */
  381. *vid = pvid;
  382. if (likely(!tagged))
  383. /* Untagged Frame. */
  384. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  385. else
  386. /* Priority-tagged Frame.
  387. * At this point, We know that skb->vlan_tci had
  388. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  389. * We update only VID field and preserve PCP field.
  390. */
  391. skb->vlan_tci |= pvid;
  392. /* if stats are disabled we can avoid the lookup */
  393. if (!br->vlan_stats_enabled)
  394. return true;
  395. }
  396. v = br_vlan_find(vg, *vid);
  397. if (!v || !br_vlan_should_use(v))
  398. goto drop;
  399. if (br->vlan_stats_enabled) {
  400. stats = this_cpu_ptr(v->stats);
  401. u64_stats_update_begin(&stats->syncp);
  402. stats->rx_bytes += skb->len;
  403. stats->rx_packets++;
  404. u64_stats_update_end(&stats->syncp);
  405. }
  406. return true;
  407. drop:
  408. kfree_skb(skb);
  409. return false;
  410. }
  411. bool br_allowed_ingress(const struct net_bridge *br,
  412. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  413. u16 *vid)
  414. {
  415. /* If VLAN filtering is disabled on the bridge, all packets are
  416. * permitted.
  417. */
  418. if (!br->vlan_enabled) {
  419. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  420. return true;
  421. }
  422. return __allowed_ingress(br, vg, skb, vid);
  423. }
  424. /* Called under RCU. */
  425. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  426. const struct sk_buff *skb)
  427. {
  428. const struct net_bridge_vlan *v;
  429. u16 vid;
  430. /* If this packet was not filtered at input, let it pass */
  431. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  432. return true;
  433. br_vlan_get_tag(skb, &vid);
  434. v = br_vlan_find(vg, vid);
  435. if (v && br_vlan_should_use(v))
  436. return true;
  437. return false;
  438. }
  439. /* Called under RCU */
  440. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  441. {
  442. struct net_bridge_vlan_group *vg;
  443. struct net_bridge *br = p->br;
  444. /* If filtering was disabled at input, let it pass. */
  445. if (!br->vlan_enabled)
  446. return true;
  447. vg = nbp_vlan_group_rcu(p);
  448. if (!vg || !vg->num_vlans)
  449. return false;
  450. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  451. *vid = 0;
  452. if (!*vid) {
  453. *vid = br_get_pvid(vg);
  454. if (!*vid)
  455. return false;
  456. return true;
  457. }
  458. if (br_vlan_find(vg, *vid))
  459. return true;
  460. return false;
  461. }
  462. /* Must be protected by RTNL.
  463. * Must be called with vid in range from 1 to 4094 inclusive.
  464. */
  465. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  466. {
  467. struct net_bridge_vlan_group *vg;
  468. struct net_bridge_vlan *vlan;
  469. int ret;
  470. ASSERT_RTNL();
  471. vg = br_vlan_group(br);
  472. vlan = br_vlan_find(vg, vid);
  473. if (vlan) {
  474. if (!br_vlan_is_brentry(vlan)) {
  475. /* Trying to change flags of non-existent bridge vlan */
  476. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  477. return -EINVAL;
  478. /* It was only kept for port vlans, now make it real */
  479. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  480. vlan->vid);
  481. if (ret) {
  482. br_err(br, "failed insert local address into bridge forwarding table\n");
  483. return ret;
  484. }
  485. atomic_inc(&vlan->refcnt);
  486. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  487. vg->num_vlans++;
  488. }
  489. __vlan_add_flags(vlan, flags);
  490. return 0;
  491. }
  492. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  493. if (!vlan)
  494. return -ENOMEM;
  495. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  496. if (!vlan->stats) {
  497. kfree(vlan);
  498. return -ENOMEM;
  499. }
  500. vlan->vid = vid;
  501. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  502. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  503. vlan->br = br;
  504. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  505. atomic_set(&vlan->refcnt, 1);
  506. ret = __vlan_add(vlan, flags);
  507. if (ret) {
  508. free_percpu(vlan->stats);
  509. kfree(vlan);
  510. }
  511. return ret;
  512. }
  513. /* Must be protected by RTNL.
  514. * Must be called with vid in range from 1 to 4094 inclusive.
  515. */
  516. int br_vlan_delete(struct net_bridge *br, u16 vid)
  517. {
  518. struct net_bridge_vlan_group *vg;
  519. struct net_bridge_vlan *v;
  520. ASSERT_RTNL();
  521. vg = br_vlan_group(br);
  522. v = br_vlan_find(vg, vid);
  523. if (!v || !br_vlan_is_brentry(v))
  524. return -ENOENT;
  525. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  526. br_fdb_delete_by_port(br, NULL, vid, 0);
  527. return __vlan_del(v);
  528. }
  529. void br_vlan_flush(struct net_bridge *br)
  530. {
  531. struct net_bridge_vlan_group *vg;
  532. ASSERT_RTNL();
  533. vg = br_vlan_group(br);
  534. __vlan_flush(vg);
  535. RCU_INIT_POINTER(br->vlgrp, NULL);
  536. synchronize_rcu();
  537. __vlan_group_free(vg);
  538. }
  539. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  540. {
  541. if (!vg)
  542. return NULL;
  543. return br_vlan_lookup(&vg->vlan_hash, vid);
  544. }
  545. /* Must be protected by RTNL. */
  546. static void recalculate_group_addr(struct net_bridge *br)
  547. {
  548. if (br->group_addr_set)
  549. return;
  550. spin_lock_bh(&br->lock);
  551. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  552. /* Bridge Group Address */
  553. br->group_addr[5] = 0x00;
  554. } else { /* vlan_enabled && ETH_P_8021AD */
  555. /* Provider Bridge Group Address */
  556. br->group_addr[5] = 0x08;
  557. }
  558. spin_unlock_bh(&br->lock);
  559. }
  560. /* Must be protected by RTNL. */
  561. void br_recalculate_fwd_mask(struct net_bridge *br)
  562. {
  563. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  564. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  565. else /* vlan_enabled && ETH_P_8021AD */
  566. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  567. ~(1u << br->group_addr[5]);
  568. }
  569. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  570. {
  571. struct switchdev_attr attr = {
  572. .orig_dev = br->dev,
  573. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  574. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  575. .u.vlan_filtering = val,
  576. };
  577. int err;
  578. if (br->vlan_enabled == val)
  579. return 0;
  580. err = switchdev_port_attr_set(br->dev, &attr);
  581. if (err && err != -EOPNOTSUPP)
  582. return err;
  583. br->vlan_enabled = val;
  584. br_manage_promisc(br);
  585. recalculate_group_addr(br);
  586. br_recalculate_fwd_mask(br);
  587. return 0;
  588. }
  589. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  590. {
  591. return __br_vlan_filter_toggle(br, val);
  592. }
  593. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  594. {
  595. int err = 0;
  596. struct net_bridge_port *p;
  597. struct net_bridge_vlan *vlan;
  598. struct net_bridge_vlan_group *vg;
  599. __be16 oldproto;
  600. if (br->vlan_proto == proto)
  601. return 0;
  602. /* Add VLANs for the new proto to the device filter. */
  603. list_for_each_entry(p, &br->port_list, list) {
  604. vg = nbp_vlan_group(p);
  605. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  606. err = vlan_vid_add(p->dev, proto, vlan->vid);
  607. if (err)
  608. goto err_filt;
  609. }
  610. }
  611. oldproto = br->vlan_proto;
  612. br->vlan_proto = proto;
  613. recalculate_group_addr(br);
  614. br_recalculate_fwd_mask(br);
  615. /* Delete VLANs for the old proto from the device filter. */
  616. list_for_each_entry(p, &br->port_list, list) {
  617. vg = nbp_vlan_group(p);
  618. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  619. vlan_vid_del(p->dev, oldproto, vlan->vid);
  620. }
  621. return 0;
  622. err_filt:
  623. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  624. vlan_vid_del(p->dev, proto, vlan->vid);
  625. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  626. vg = nbp_vlan_group(p);
  627. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  628. vlan_vid_del(p->dev, proto, vlan->vid);
  629. }
  630. return err;
  631. }
  632. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  633. {
  634. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  635. return -EPROTONOSUPPORT;
  636. return __br_vlan_set_proto(br, htons(val));
  637. }
  638. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  639. {
  640. switch (val) {
  641. case 0:
  642. case 1:
  643. br->vlan_stats_enabled = val;
  644. break;
  645. default:
  646. return -EINVAL;
  647. }
  648. return 0;
  649. }
  650. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  651. {
  652. struct net_bridge_vlan *v;
  653. if (vid != vg->pvid)
  654. return false;
  655. v = br_vlan_lookup(&vg->vlan_hash, vid);
  656. if (v && br_vlan_should_use(v) &&
  657. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  658. return true;
  659. return false;
  660. }
  661. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  662. {
  663. struct net_bridge_port *p;
  664. u16 pvid = br->default_pvid;
  665. /* Disable default_pvid on all ports where it is still
  666. * configured.
  667. */
  668. if (vlan_default_pvid(br_vlan_group(br), pvid))
  669. br_vlan_delete(br, pvid);
  670. list_for_each_entry(p, &br->port_list, list) {
  671. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  672. nbp_vlan_delete(p, pvid);
  673. }
  674. br->default_pvid = 0;
  675. }
  676. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  677. {
  678. const struct net_bridge_vlan *pvent;
  679. struct net_bridge_vlan_group *vg;
  680. struct net_bridge_port *p;
  681. u16 old_pvid;
  682. int err = 0;
  683. unsigned long *changed;
  684. if (!pvid) {
  685. br_vlan_disable_default_pvid(br);
  686. return 0;
  687. }
  688. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  689. GFP_KERNEL);
  690. if (!changed)
  691. return -ENOMEM;
  692. old_pvid = br->default_pvid;
  693. /* Update default_pvid config only if we do not conflict with
  694. * user configuration.
  695. */
  696. vg = br_vlan_group(br);
  697. pvent = br_vlan_find(vg, pvid);
  698. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  699. (!pvent || !br_vlan_should_use(pvent))) {
  700. err = br_vlan_add(br, pvid,
  701. BRIDGE_VLAN_INFO_PVID |
  702. BRIDGE_VLAN_INFO_UNTAGGED |
  703. BRIDGE_VLAN_INFO_BRENTRY);
  704. if (err)
  705. goto out;
  706. br_vlan_delete(br, old_pvid);
  707. set_bit(0, changed);
  708. }
  709. list_for_each_entry(p, &br->port_list, list) {
  710. /* Update default_pvid config only if we do not conflict with
  711. * user configuration.
  712. */
  713. vg = nbp_vlan_group(p);
  714. if ((old_pvid &&
  715. !vlan_default_pvid(vg, old_pvid)) ||
  716. br_vlan_find(vg, pvid))
  717. continue;
  718. err = nbp_vlan_add(p, pvid,
  719. BRIDGE_VLAN_INFO_PVID |
  720. BRIDGE_VLAN_INFO_UNTAGGED);
  721. if (err)
  722. goto err_port;
  723. nbp_vlan_delete(p, old_pvid);
  724. set_bit(p->port_no, changed);
  725. }
  726. br->default_pvid = pvid;
  727. out:
  728. kfree(changed);
  729. return err;
  730. err_port:
  731. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  732. if (!test_bit(p->port_no, changed))
  733. continue;
  734. if (old_pvid)
  735. nbp_vlan_add(p, old_pvid,
  736. BRIDGE_VLAN_INFO_PVID |
  737. BRIDGE_VLAN_INFO_UNTAGGED);
  738. nbp_vlan_delete(p, pvid);
  739. }
  740. if (test_bit(0, changed)) {
  741. if (old_pvid)
  742. br_vlan_add(br, old_pvid,
  743. BRIDGE_VLAN_INFO_PVID |
  744. BRIDGE_VLAN_INFO_UNTAGGED |
  745. BRIDGE_VLAN_INFO_BRENTRY);
  746. br_vlan_delete(br, pvid);
  747. }
  748. goto out;
  749. }
  750. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  751. {
  752. u16 pvid = val;
  753. int err = 0;
  754. if (val >= VLAN_VID_MASK)
  755. return -EINVAL;
  756. if (pvid == br->default_pvid)
  757. goto out;
  758. /* Only allow default pvid change when filtering is disabled */
  759. if (br->vlan_enabled) {
  760. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  761. err = -EPERM;
  762. goto out;
  763. }
  764. err = __br_vlan_set_default_pvid(br, pvid);
  765. out:
  766. return err;
  767. }
  768. int br_vlan_init(struct net_bridge *br)
  769. {
  770. struct net_bridge_vlan_group *vg;
  771. int ret = -ENOMEM;
  772. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  773. if (!vg)
  774. goto out;
  775. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  776. if (ret)
  777. goto err_rhtbl;
  778. INIT_LIST_HEAD(&vg->vlan_list);
  779. br->vlan_proto = htons(ETH_P_8021Q);
  780. br->default_pvid = 1;
  781. rcu_assign_pointer(br->vlgrp, vg);
  782. ret = br_vlan_add(br, 1,
  783. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  784. BRIDGE_VLAN_INFO_BRENTRY);
  785. if (ret)
  786. goto err_vlan_add;
  787. out:
  788. return ret;
  789. err_vlan_add:
  790. rhashtable_destroy(&vg->vlan_hash);
  791. err_rhtbl:
  792. kfree(vg);
  793. goto out;
  794. }
  795. int nbp_vlan_init(struct net_bridge_port *p)
  796. {
  797. struct switchdev_attr attr = {
  798. .orig_dev = p->br->dev,
  799. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  800. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  801. .u.vlan_filtering = p->br->vlan_enabled,
  802. };
  803. struct net_bridge_vlan_group *vg;
  804. int ret = -ENOMEM;
  805. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  806. if (!vg)
  807. goto out;
  808. ret = switchdev_port_attr_set(p->dev, &attr);
  809. if (ret && ret != -EOPNOTSUPP)
  810. goto err_vlan_enabled;
  811. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  812. if (ret)
  813. goto err_rhtbl;
  814. INIT_LIST_HEAD(&vg->vlan_list);
  815. rcu_assign_pointer(p->vlgrp, vg);
  816. if (p->br->default_pvid) {
  817. ret = nbp_vlan_add(p, p->br->default_pvid,
  818. BRIDGE_VLAN_INFO_PVID |
  819. BRIDGE_VLAN_INFO_UNTAGGED);
  820. if (ret)
  821. goto err_vlan_add;
  822. }
  823. out:
  824. return ret;
  825. err_vlan_add:
  826. RCU_INIT_POINTER(p->vlgrp, NULL);
  827. synchronize_rcu();
  828. rhashtable_destroy(&vg->vlan_hash);
  829. err_vlan_enabled:
  830. err_rhtbl:
  831. kfree(vg);
  832. goto out;
  833. }
  834. /* Must be protected by RTNL.
  835. * Must be called with vid in range from 1 to 4094 inclusive.
  836. */
  837. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  838. {
  839. struct switchdev_obj_port_vlan v = {
  840. .obj.orig_dev = port->dev,
  841. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  842. .flags = flags,
  843. .vid_begin = vid,
  844. .vid_end = vid,
  845. };
  846. struct net_bridge_vlan *vlan;
  847. int ret;
  848. ASSERT_RTNL();
  849. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  850. if (vlan) {
  851. /* Pass the flags to the hardware bridge */
  852. ret = switchdev_port_obj_add(port->dev, &v.obj);
  853. if (ret && ret != -EOPNOTSUPP)
  854. return ret;
  855. __vlan_add_flags(vlan, flags);
  856. return 0;
  857. }
  858. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  859. if (!vlan)
  860. return -ENOMEM;
  861. vlan->vid = vid;
  862. vlan->port = port;
  863. ret = __vlan_add(vlan, flags);
  864. if (ret)
  865. kfree(vlan);
  866. return ret;
  867. }
  868. /* Must be protected by RTNL.
  869. * Must be called with vid in range from 1 to 4094 inclusive.
  870. */
  871. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  872. {
  873. struct net_bridge_vlan *v;
  874. ASSERT_RTNL();
  875. v = br_vlan_find(nbp_vlan_group(port), vid);
  876. if (!v)
  877. return -ENOENT;
  878. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  879. br_fdb_delete_by_port(port->br, port, vid, 0);
  880. return __vlan_del(v);
  881. }
  882. void nbp_vlan_flush(struct net_bridge_port *port)
  883. {
  884. struct net_bridge_vlan_group *vg;
  885. ASSERT_RTNL();
  886. vg = nbp_vlan_group(port);
  887. __vlan_flush(vg);
  888. RCU_INIT_POINTER(port->vlgrp, NULL);
  889. synchronize_rcu();
  890. __vlan_group_free(vg);
  891. }
  892. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  893. struct br_vlan_stats *stats)
  894. {
  895. int i;
  896. memset(stats, 0, sizeof(*stats));
  897. for_each_possible_cpu(i) {
  898. u64 rxpackets, rxbytes, txpackets, txbytes;
  899. struct br_vlan_stats *cpu_stats;
  900. unsigned int start;
  901. cpu_stats = per_cpu_ptr(v->stats, i);
  902. do {
  903. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  904. rxpackets = cpu_stats->rx_packets;
  905. rxbytes = cpu_stats->rx_bytes;
  906. txbytes = cpu_stats->tx_bytes;
  907. txpackets = cpu_stats->tx_packets;
  908. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  909. stats->rx_packets += rxpackets;
  910. stats->rx_bytes += rxbytes;
  911. stats->tx_bytes += txbytes;
  912. stats->tx_packets += txpackets;
  913. }
  914. }