From 24d6854622d92bbf3c01365ab5b4b1d30d50d720 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Tue, 24 Sep 2024 19:57:46 +0000 Subject: [PATCH 01/11] zebra,pimd,lib: Modify ZEBRA_NEXTHOP_LOOKUP_MRIB Modified ZEBRA_NEXTHOP_LOOKUP_MRIB to include the SAFI from which to do the lookup. This generalizes the API away from MRIB specifically and allows the user to decide how it should do lookups. Rename ZEBRA_NEXTHOP_LOOKUP_MRIB to ZEBRA_NEXTHOP_LOOKUP now that it is more generalized. This change is in preperation to remove multicast lookup mode completely from zebra. Signed-off-by: Nathan Bahr --- lib/log.c | 2 +- lib/zclient.h | 2 +- pimd/pim_zlookup.c | 5 ++-- zebra/rib.h | 6 +---- zebra/zapi_msg.c | 36 ++++++++++++++++++--------- zebra/zebra_rib.c | 61 +--------------------------------------------- 6 files changed, 31 insertions(+), 81 deletions(-) diff --git a/lib/log.c b/lib/log.c index 2b049cebe41e..bc1ed5c5ccae 100644 --- a/lib/log.c +++ b/lib/log.c @@ -358,7 +358,7 @@ static const struct zebra_desc_table command_types[] = { DESC_ENTRY(ZEBRA_BFD_CLIENT_DEREGISTER), DESC_ENTRY(ZEBRA_INTERFACE_ENABLE_RADV), DESC_ENTRY(ZEBRA_INTERFACE_DISABLE_RADV), - DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP_MRIB), + DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP), DESC_ENTRY(ZEBRA_INTERFACE_LINK_PARAMS), DESC_ENTRY(ZEBRA_MPLS_LABELS_ADD), DESC_ENTRY(ZEBRA_MPLS_LABELS_DELETE), diff --git a/lib/zclient.h b/lib/zclient.h index 6da9558aa560..2385a8a2197b 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -131,7 +131,7 @@ typedef enum { ZEBRA_BFD_CLIENT_DEREGISTER, ZEBRA_INTERFACE_ENABLE_RADV, ZEBRA_INTERFACE_DISABLE_RADV, - ZEBRA_NEXTHOP_LOOKUP_MRIB, + ZEBRA_NEXTHOP_LOOKUP, ZEBRA_INTERFACE_LINK_PARAMS, ZEBRA_MPLS_LABELS_ADD, ZEBRA_MPLS_LABELS_DELETE, diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index 5d344f1f66ba..e2644c531bab 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -162,7 +162,7 @@ static int zclient_read_nexthop(struct pim_instance *pim, s = zlookup->ibuf; - while (command != ZEBRA_NEXTHOP_LOOKUP_MRIB) { + while (command != ZEBRA_NEXTHOP_LOOKUP) { stream_reset(s); err = zclient_read_header(s, zlookup->sock, &length, &marker, &version, &vrf_id, &command); @@ -337,8 +337,9 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim, s = zlookup->obuf; stream_reset(s); - zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, pim->vrf->vrf_id); + zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, pim->vrf->vrf_id); stream_put_ipaddr(s, &ipaddr); + stream_putc(s, SAFI_MULTICAST); // TODO NEB Set the real safi stream_putw_at(s, 0, stream_get_endp(s)); ret = writen(zlookup->sock, s->data, stream_get_endp(s)); diff --git a/zebra/rib.h b/zebra/rib.h index 5fedb07335ef..8484fe1291a2 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -402,11 +402,7 @@ extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, bool fromkernel); extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, - const union g_addr *addr, - struct route_node **rn_out); -extern struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id, - union g_addr *gaddr, - struct route_node **rn_out); + const union g_addr *addr, struct route_node **rn_out); extern void rib_update(enum rib_update_event event); extern void rib_update_table(struct route_table *table, diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 10acee9be435..4a22e7ca1bab 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -640,10 +640,15 @@ int zsend_redistribute_route(int cmd, struct zserv *client, * (Otherwise we would need to implement sending NHT updates for the result of * this "URIB-MRIB-combined" table, but we only decide that here on the fly, * so it'd be rather complex to do NHT for.) + * + * 9/19/24 NEB I've updated this API to include the SAFI in the lookup + * request and response. This allows PIM to do a syncronous lookup for the + * correct table along side NHT. + * This also makes this a more generic synchronous lookup not specifically + * tied to the mrib. */ -static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr, - struct route_entry *re, - struct zebra_vrf *zvrf) +static int zsend_nexthop_lookup(struct zserv *client, struct ipaddr *addr, struct route_entry *re, + struct route_node *rn, struct zebra_vrf *zvrf, safi_t safi) { struct stream *s; unsigned long nump; @@ -655,14 +660,16 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr, stream_reset(s); /* Fill in result. */ - zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf)); + zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, zvrf_id(zvrf)); stream_put_ipaddr(s, addr); - if (re) { + if (re && rn) { struct nexthop_group *nhg; stream_putc(s, re->distance); stream_putl(s, re->metric); + stream_putw(s, rn->p.prefixlen); + num = 0; /* remember position for nexthop_num */ nump = stream_get_endp(s); @@ -679,6 +686,7 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr, } else { stream_putc(s, 0); /* distance */ stream_putl(s, 0); /* metric */ + stream_putw(s, 0); /* prefix len */ stream_putw(s, 0); /* nexthop_num */ } @@ -2316,33 +2324,37 @@ static void zread_route_del(ZAPI_HANDLER_ARGS) } } -/* MRIB Nexthop lookup for IPv4. */ -static void zread_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS) +/* Syncronous Nexthop lookup. */ +static void zread_nexthop_lookup(ZAPI_HANDLER_ARGS) { struct ipaddr addr; struct route_entry *re = NULL; + struct route_node *rn = NULL; union g_addr gaddr; + afi_t afi = AFI_IP; + safi_t safi = SAFI_UNICAST; STREAM_GET_IPADDR(msg, &addr); + STREAM_GETC(msg, safi); switch (addr.ipa_type) { case IPADDR_V4: gaddr.ipv4 = addr.ipaddr_v4; - re = rib_match_multicast(AFI_IP, zvrf_id(zvrf), &gaddr, NULL); + afi = AFI_IP; break; case IPADDR_V6: gaddr.ipv6 = addr.ipaddr_v6; - re = rib_match_multicast(AFI_IP6, zvrf_id(zvrf), &gaddr, NULL); + afi = AFI_IP6; break; case IPADDR_NONE: /* ??? */ goto stream_failure; } - zsend_nexthop_lookup_mrib(client, &addr, re, zvrf); + re = rib_match(afi, safi, zvrf_id(zvrf), &gaddr, &rn); stream_failure: - return; + zsend_nexthop_lookup(client, &addr, re, rn, zvrf, safi); } /* Register zebra server router-id information. Send current router-id */ @@ -4029,7 +4041,7 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = { [ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete, [ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add, [ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete, - [ZEBRA_NEXTHOP_LOOKUP_MRIB] = zread_nexthop_lookup_mrib, + [ZEBRA_NEXTHOP_LOOKUP] = zread_nexthop_lookup, [ZEBRA_HELLO] = zread_hello, [ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register, [ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister, diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index aea39b8ecf18..87bfdf49e13a 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -503,7 +503,7 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, /* Lookup table. */ table = zebra_vrf_table(afi, safi, vrf_id); if (!table) - return 0; + return NULL; memset(&p, 0, sizeof(p)); p.family = afi; @@ -552,65 +552,6 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, return NULL; } -struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id, - union g_addr *gaddr, - struct route_node **rn_out) -{ - struct route_entry *re = NULL, *mre = NULL, *ure = NULL; - struct route_node *m_rn = NULL, *u_rn = NULL; - - switch (zrouter.ipv4_multicast_mode) { - case MCAST_MRIB_ONLY: - return rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, rn_out); - case MCAST_URIB_ONLY: - return rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, rn_out); - case MCAST_NO_CONFIG: - case MCAST_MIX_MRIB_FIRST: - re = mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn); - if (!mre) - re = ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, - &u_rn); - break; - case MCAST_MIX_DISTANCE: - mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn); - ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn); - if (mre && ure) - re = ure->distance < mre->distance ? ure : mre; - else if (mre) - re = mre; - else if (ure) - re = ure; - break; - case MCAST_MIX_PFXLEN: - mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn); - ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn); - if (mre && ure) - re = u_rn->p.prefixlen > m_rn->p.prefixlen ? ure : mre; - else if (mre) - re = mre; - else if (ure) - re = ure; - break; - } - - if (rn_out) - *rn_out = (re == mre) ? m_rn : u_rn; - - if (IS_ZEBRA_DEBUG_RIB) { - char buf[BUFSIZ]; - inet_ntop(afi == AFI_IP ? AF_INET : AF_INET6, gaddr, buf, - BUFSIZ); - - zlog_debug("%s: %s: %pRN vrf: %s(%u) found %s, using %s", - __func__, buf, (re == mre) ? m_rn : u_rn, - vrf_id_to_name(vrf_id), vrf_id, - mre ? (ure ? "MRIB+URIB" : "MRIB") - : ure ? "URIB" : "nothing", - re == ure ? "URIB" : re == mre ? "MRIB" : "none"); - } - return re; -} - /* * Is this RIB labeled-unicast? It must be of type BGP and all paths * (nexthops) must have a label. From bac759ad11e0153ee3acc406d5d4da2d61529939 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Tue, 24 Sep 2024 20:13:35 +0000 Subject: [PATCH 02/11] zebra,yang: Completely remove multicast mode from zebra Multicast mode belongs in PIM, so removing it completely from zebra. Modified `show (ip|ipv6) rpf ADDRESS` to always lookup from SAFI_MULTICAST. This means this command is now specific to the multicast table and does not necessarily reflect the PIM RPF lookup, but that should be implemented in PIM instead. Signed-off-by: Nathan Bahr --- yang/frr-zebra.yang | 47 -------------------------- zebra/zebra_nb.c | 6 ---- zebra/zebra_nb_config.c | 17 ---------- zebra/zebra_router.c | 14 -------- zebra/zebra_router.h | 18 ---------- zebra/zebra_vty.c | 75 ++--------------------------------------- 6 files changed, 2 insertions(+), 175 deletions(-) diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang index f97a4cc129be..a3c066c56c14 100644 --- a/yang/frr-zebra.yang +++ b/yang/frr-zebra.yang @@ -157,47 +157,6 @@ module frr-zebra { "Zebra interface type gre."; } - /* - * Multicast RPF mode configurable type - */ - - typedef mcast-rpf-lookup-mode { - type enumeration { - enum "none" { - value 0; - description - "No mode set."; - } - enum "mrib-only" { - value 1; - description - "Lookup in unicast RIB only."; - } - enum "urib-only" { - value 2; - description - "Lookup in multicast RIB only."; - } - enum "mrib-then-urib" { - value 3; - description - "Try multicast RIB first, fall back to unicast RIB."; - } - enum "lower-distance" { - value 4; - description - "Lookup both unicast and mcast, use entry with lower distance."; - } - enum "longer-prefix" { - value 5; - description - "Lookup both unicast and mcast, use entry with longer prefix."; - } - } - description - "Multicast RPF lookup behavior"; - } - // End of ip6-route /* * VxLAN Network Identifier type @@ -2883,12 +2842,6 @@ module frr-zebra { container zebra { description "Data model for the Zebra daemon."; - leaf mcast-rpf-lookup { - type frr-zebra:mcast-rpf-lookup-mode; - default "mrib-then-urib"; - description - "Multicast RPF lookup behavior."; - } leaf ip-forwarding { type boolean; description diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c index 0a7ed5db41f0..6b41993a9591 100644 --- a/zebra/zebra_nb.c +++ b/zebra/zebra_nb.c @@ -25,12 +25,6 @@ const struct frr_yang_module_info frr_zebra_info = { .name = "frr-zebra", .features = features, .nodes = { - { - .xpath = "/frr-zebra:zebra/mcast-rpf-lookup", - .cbs = { - .modify = zebra_mcast_rpf_lookup_modify, - } - }, { .xpath = "/frr-zebra:zebra/ip-forwarding", .cbs = { diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c index 09c0091ec693..ec151360bd1a 100644 --- a/zebra/zebra_nb_config.c +++ b/zebra/zebra_nb_config.c @@ -30,23 +30,6 @@ #include "zebra/zebra_rnh.h" #include "zebra/table_manager.h" -/* - * XPath: /frr-zebra:zebra/mcast-rpf-lookup - */ -int zebra_mcast_rpf_lookup_modify(struct nb_cb_modify_args *args) -{ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ - break; - } - - return NB_OK; -} - /* * XPath: /frr-zebra:zebra/ip-forwarding */ diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c index 4022c1a26fc6..ae2910af410a 100644 --- a/zebra/zebra_router.c +++ b/zebra/zebra_router.c @@ -23,7 +23,6 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZEBRA_RT_TABLE, "Zebra VRF table"); struct zebra_router zrouter = { .multipath_num = MULTIPATH_NUM, - .ipv4_multicast_mode = MCAST_NO_CONFIG, }; static inline int @@ -221,19 +220,6 @@ uint32_t zebra_router_get_next_sequence(void) memory_order_relaxed); } -void multicast_mode_ipv4_set(enum multicast_mode mode) -{ - if (IS_ZEBRA_DEBUG_RIB) - zlog_debug("%s: multicast lookup mode set (%d)", __func__, - mode); - zrouter.ipv4_multicast_mode = mode; -} - -enum multicast_mode multicast_mode_ipv4_get(void) -{ - return zrouter.ipv4_multicast_mode; -} - void zebra_router_terminate(void) { struct zebra_router_table *zrt, *tmp; diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h index c86c6be1ef56..6b6b2787ac71 100644 --- a/zebra/zebra_router.h +++ b/zebra/zebra_router.h @@ -34,17 +34,6 @@ RB_HEAD(zebra_router_table_head, zebra_router_table); RB_PROTOTYPE(zebra_router_table_head, zebra_router_table, zebra_router_table_entry, zebra_router_table_entry_compare) -/* RPF lookup behaviour */ -enum multicast_mode { - MCAST_NO_CONFIG = 0, /* MIX_MRIB_FIRST, but no show in config write */ - MCAST_MRIB_ONLY, /* MRIB only */ - MCAST_URIB_ONLY, /* URIB only */ - MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */ - MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */ - MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */ - /* on equal value, MRIB wins for last 2 */ -}; - /* An interface can be error-disabled if a protocol (such as EVPN or * VRRP) detects a problem with keeping it operationally-up. * If any of the protodown bits are set protodown-on is programmed @@ -187,9 +176,6 @@ struct zebra_router { uint32_t multipath_num; - /* RPF Lookup behavior */ - enum multicast_mode ipv4_multicast_mode; - /* * zebra start time and time of sweeping RIB of old routes */ @@ -287,10 +273,6 @@ static inline struct zebra_vrf *zebra_vrf_get_evpn(void) : zebra_vrf_lookup_by_id(VRF_DEFAULT); } -extern void multicast_mode_ipv4_set(enum multicast_mode mode); - -extern enum multicast_mode multicast_mode_ipv4_get(void); - extern bool zebra_router_notify_on_ack(void); static inline void zebra_router_set_supports_nhgs(bool support) diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 309cde9a35b2..978134161933 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -88,56 +88,6 @@ static void show_ip_route_nht_dump(struct vty *vty, const struct route_entry *re, unsigned int num); -DEFUN (ip_multicast_mode, - ip_multicast_mode_cmd, - "ip multicast rpf-lookup-mode ", - IP_STR - "Multicast options\n" - "RPF lookup behavior\n" - "Lookup in unicast RIB only\n" - "Lookup in multicast RIB only\n" - "Try multicast RIB first, fall back to unicast RIB\n" - "Lookup both, use entry with lower distance\n" - "Lookup both, use entry with longer prefix\n") -{ - char *mode = argv[3]->text; - - if (strmatch(mode, "urib-only")) - multicast_mode_ipv4_set(MCAST_URIB_ONLY); - else if (strmatch(mode, "mrib-only")) - multicast_mode_ipv4_set(MCAST_MRIB_ONLY); - else if (strmatch(mode, "mrib-then-urib")) - multicast_mode_ipv4_set(MCAST_MIX_MRIB_FIRST); - else if (strmatch(mode, "lower-distance")) - multicast_mode_ipv4_set(MCAST_MIX_DISTANCE); - else if (strmatch(mode, "longer-prefix")) - multicast_mode_ipv4_set(MCAST_MIX_PFXLEN); - else { - vty_out(vty, "Invalid mode specified\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - return CMD_SUCCESS; -} - -DEFUN (no_ip_multicast_mode, - no_ip_multicast_mode_cmd, - "no ip multicast rpf-lookup-mode []", - NO_STR - IP_STR - "Multicast options\n" - "RPF lookup behavior\n" - "Lookup in unicast RIB only\n" - "Lookup in multicast RIB only\n" - "Try multicast RIB first, fall back to unicast RIB\n" - "Lookup both, use entry with lower distance\n" - "Lookup both, use entry with longer prefix\n") -{ - multicast_mode_ipv4_set(MCAST_NO_CONFIG); - return CMD_SUCCESS; -} - - DEFPY (show_ip_rpf, show_ip_rpf_cmd, "show [ip$ip|ipv6$ipv6] rpf [json]", @@ -168,8 +118,7 @@ DEFPY (show_ip_rpf_addr, struct route_node *rn; struct route_entry *re; - re = rib_match_multicast(AFI_IP, VRF_DEFAULT, (union g_addr *)&address, - &rn); + re = rib_match(AFI_IP, SAFI_MULTICAST, VRF_DEFAULT, (union g_addr *)&address, &rn); if (re) vty_show_ip_route_detail(vty, rn, 1, false, false); @@ -190,8 +139,7 @@ DEFPY (show_ipv6_rpf_addr, struct route_node *rn; struct route_entry *re; - re = rib_match_multicast(AFI_IP6, VRF_DEFAULT, (union g_addr *)&address, - &rn); + re = rib_match(AFI_IP6, SAFI_MULTICAST, VRF_DEFAULT, (union g_addr *)&address, &rn); if (re) vty_show_ip_route_detail(vty, rn, 1, false, false); @@ -3766,22 +3714,6 @@ static int config_write_protocol(struct vty *vty) vty_out(vty, "zebra zapi-packets %u\n", zrouter.packets_to_process); - enum multicast_mode ipv4_multicast_mode = multicast_mode_ipv4_get(); - - if (ipv4_multicast_mode != MCAST_NO_CONFIG) - vty_out(vty, "ip multicast rpf-lookup-mode %s\n", - ipv4_multicast_mode == MCAST_URIB_ONLY - ? "urib-only" - : ipv4_multicast_mode == MCAST_MRIB_ONLY - ? "mrib-only" - : ipv4_multicast_mode - == MCAST_MIX_MRIB_FIRST - ? "mrib-then-urib" - : ipv4_multicast_mode - == MCAST_MIX_DISTANCE - ? "lower-distance" - : "longer-prefix"); - /* Include dataplane info */ dplane_config_write_helper(vty); @@ -4365,9 +4297,6 @@ void zebra_vty_init(void) install_element(CONFIG_NODE, &allow_external_route_update_cmd); install_element(CONFIG_NODE, &no_allow_external_route_update_cmd); - install_element(CONFIG_NODE, &ip_multicast_mode_cmd); - install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd); - install_element(CONFIG_NODE, &zebra_nexthop_group_keep_cmd); install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd); install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd); From 42514432de5db5a31a14cf8f8b6c12c7f0f7b1ee Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Tue, 29 Oct 2024 19:30:00 +0000 Subject: [PATCH 03/11] zebra: Improve multicast safi route show commands Add `mrib` flag to existing "show ip route" commands which then use the multicast safi rather than the unicast safi. Updated the vty output to include the AFI and SAFI string when printing the table. Deprecate `show ip rpf` command, aliased to `show ip route mrib`. Removed `show ip rpf A.B.C.D`. Signed-off-by: Nathan Bahr --- zebra/zebra_vty.c | 324 +++++++++++++++++++--------------------------- 1 file changed, 135 insertions(+), 189 deletions(-) diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 978134161933..4d1af2ee8606 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -81,74 +81,14 @@ static void show_nexthop_detail_helper(struct vty *vty, const struct nexthop *nexthop, bool is_backup); -static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table); +static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi, + safi_t safi); static void show_ip_route_nht_dump(struct vty *vty, const struct nexthop *nexthop, const struct route_node *rn, const struct route_entry *re, unsigned int num); -DEFPY (show_ip_rpf, - show_ip_rpf_cmd, - "show [ip$ip|ipv6$ipv6] rpf [json]", - SHOW_STR - IP_STR - IPV6_STR - "Display RPF information for multicast source\n" - JSON_STR) -{ - bool uj = use_json(argc, argv); - struct route_show_ctx ctx = { - .multi = false, - }; - - return do_show_ip_route(vty, VRF_DEFAULT_NAME, ip ? AFI_IP : AFI_IP6, - SAFI_MULTICAST, false, uj, 0, NULL, false, 0, 0, - 0, false, &ctx); -} - -DEFPY (show_ip_rpf_addr, - show_ip_rpf_addr_cmd, - "show ip rpf A.B.C.D$address", - SHOW_STR - IP_STR - "Display RPF information for multicast source\n" - "IP multicast source address (e.g. 10.0.0.0)\n") -{ - struct route_node *rn; - struct route_entry *re; - - re = rib_match(AFI_IP, SAFI_MULTICAST, VRF_DEFAULT, (union g_addr *)&address, &rn); - - if (re) - vty_show_ip_route_detail(vty, rn, 1, false, false); - else - vty_out(vty, "%% No match for RPF lookup\n"); - - return CMD_SUCCESS; -} - -DEFPY (show_ipv6_rpf_addr, - show_ipv6_rpf_addr_cmd, - "show ipv6 rpf X:X::X:X$address", - SHOW_STR - IPV6_STR - "Display RPF information for multicast source\n" - "IPv6 multicast source address\n") -{ - struct route_node *rn; - struct route_entry *re; - - re = rib_match(AFI_IP6, SAFI_MULTICAST, VRF_DEFAULT, (union g_addr *)&address, &rn); - - if (re) - vty_show_ip_route_detail(vty, rn, 1, false, false); - else - vty_out(vty, "%% No match for RPF lookup\n"); - - return CMD_SUCCESS; -} - static char re_status_output_char(const struct route_entry *re, const struct nexthop *nhop, bool is_fib) @@ -806,14 +746,11 @@ static void vty_show_ip_route_detail_json(struct vty *vty, vty_json(vty, json); } -static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, - struct route_table *table, afi_t afi, - bool use_fib, route_tag_t tag, - const struct prefix *longer_prefix_p, - bool supernets_only, int type, - unsigned short ospf_instance_id, bool use_json, - uint32_t tableid, bool show_ng, - struct route_show_ctx *ctx) +static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, struct route_table *table, + afi_t afi, safi_t safi, bool use_fib, route_tag_t tag, + const struct prefix *longer_prefix_p, bool supernets_only, + int type, unsigned short ospf_instance_id, bool use_json, + uint32_t tableid, bool show_ng, struct route_show_ctx *ctx) { struct route_node *rn; struct route_entry *re; @@ -885,17 +822,12 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, } if (ctx->multi && ctx->header_done) vty_out(vty, "\n"); - if (ctx->multi || zvrf_id(zvrf) != VRF_DEFAULT - || tableid) { - if (!tableid) - vty_out(vty, "VRF %s:\n", - zvrf_name(zvrf)); - else - vty_out(vty, - "VRF %s table %u:\n", - zvrf_name(zvrf), - tableid); - } + if (!tableid) + vty_out(vty, "%s %s VRF %s:\n", afi2str(afi), + safi2str(safi), zvrf_name(zvrf)); + else + vty_out(vty, "%s %s VRF %s table %u:\n", afi2str(afi), + safi2str(safi), zvrf_name(zvrf), tableid); ctx->header_done = true; first = 0; } @@ -917,12 +849,10 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, vty_json_close(vty, first_json); } -static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf, - afi_t afi, bool use_fib, bool use_json, - route_tag_t tag, - const struct prefix *longer_prefix_p, - bool supernets_only, int type, - unsigned short ospf_instance_id, bool show_ng, +static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf, afi_t afi, safi_t safi, + bool use_fib, bool use_json, route_tag_t tag, + const struct prefix *longer_prefix_p, bool supernets_only, + int type, unsigned short ospf_instance_id, bool show_ng, struct route_show_ctx *ctx) { struct zebra_router_table *zrt; @@ -934,13 +864,11 @@ static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf, if (zvrf != info->zvrf) continue; - if (zrt->afi != afi || - zrt->safi != SAFI_UNICAST) + if (zrt->afi != afi || zrt->safi != safi) continue; - do_show_ip_route(vty, zvrf_name(zvrf), afi, SAFI_UNICAST, - use_fib, use_json, tag, longer_prefix_p, - supernets_only, type, ospf_instance_id, + do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, use_fib, use_json, tag, + longer_prefix_p, supernets_only, type, ospf_instance_id, zrt->tableid, show_ng, ctx); } } @@ -973,7 +901,7 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi, } if (tableid) - table = zebra_router_find_table(zvrf, tableid, afi, SAFI_UNICAST); + table = zebra_router_find_table(zvrf, tableid, afi, safi); else table = zebra_vrf_table(afi, safi, zvrf_id(zvrf)); if (!table) { @@ -982,9 +910,9 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi, return CMD_SUCCESS; } - do_show_route_helper(vty, zvrf, table, afi, use_fib, tag, - longer_prefix_p, supernets_only, type, - ospf_instance_id, use_json, tableid, show_ng, ctx); + do_show_route_helper(vty, zvrf, table, afi, safi, use_fib, tag, longer_prefix_p, + supernets_only, type, ospf_instance_id, use_json, tableid, show_ng, + ctx); return CMD_SUCCESS; } @@ -1637,27 +1565,35 @@ DEFPY_HIDDEN(rnh_hide_backups, rnh_hide_backups_cmd, DEFPY (show_route, show_route_cmd, "show\ - <\ - ip$ipv4 [table <(1-4294967295)$table|all$table_all>]\ - [vrf ]\ - [{\ - tag (1-4294967295)\ - |A.B.C.D/M$prefix longer-prefixes\ - |supernets-only$supernets_only\ - }]\ - [<\ - " FRR_IP_REDIST_STR_ZEBRA "$type_str\ - |ospf$type_str (1-65535)$ospf_instance_id\ - >]\ - |ipv6$ipv6 [table <(1-4294967295)$table|all$table_all>]\ - [vrf ]\ - [{\ - tag (1-4294967295)\ - |X:X::X:X/M$prefix longer-prefixes\ - }]\ - [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\ - >\ - []", + <\ + ip$ipv4 \ + [{\ + table <(1-4294967295)$table|all$table_all>\ + |mrib$mrib\ + |vrf \ + }]\ + [{\ + tag (1-4294967295)\ + |A.B.C.D/M$prefix longer-prefixes\ + |supernets-only$supernets_only\ + }]\ + [<\ + " FRR_IP_REDIST_STR_ZEBRA "$type_str\ + |ospf$type_str (1-65535)$ospf_instance_id\ + >]\ + |ipv6$ipv6 \ + [{\ + table <(1-4294967295)$table|all$table_all>\ + |mrib$mrib\ + |vrf \ + }]\ + [{\ + tag (1-4294967295)\ + |X:X::X:X/M$prefix longer-prefixes\ + }]\ + [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\ + >\ + []", SHOW_STR IP_STR "IP forwarding table\n" @@ -1665,6 +1601,7 @@ DEFPY (show_route, "Table to display\n" "The table number to display\n" "All tables\n" + "Multicast SAFI table\n" VRF_FULL_CMD_HELP_STR "Show only routes with tag\n" "Tag value\n" @@ -1680,6 +1617,7 @@ DEFPY (show_route, "Table to display\n" "The table number to display\n" "All tables\n" + "Multicast SAFI table\n" VRF_FULL_CMD_HELP_STR "Show only routes with tag\n" "Tag value\n" @@ -1690,6 +1628,7 @@ DEFPY (show_route, "Nexthop Group Information\n") { afi_t afi = ipv4 ? AFI_IP : AFI_IP6; + safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST; bool first_vrf_json = true; struct vrf *vrf; int type = 0; @@ -1719,26 +1658,19 @@ DEFPY (show_route, if (vrf_all) { RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { - if ((zvrf = vrf->info) == NULL - || (zvrf->table[afi][SAFI_UNICAST] == NULL)) + if ((zvrf = vrf->info) == NULL || (zvrf->table[afi][safi] == NULL)) continue; if (json) vty_json_key(vty, zvrf_name(zvrf), &first_vrf_json); if (table_all) - do_show_ip_route_all(vty, zvrf, afi, !!fib, - !!json, tag, - prefix_str ? prefix : NULL, - !!supernets_only, type, - ospf_instance_id, !!ng, - &ctx); + do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag, + prefix_str ? prefix : NULL, !!supernets_only, + type, ospf_instance_id, !!ng, &ctx); else - do_show_ip_route(vty, zvrf_name(zvrf), afi, - SAFI_UNICAST, !!fib, !!json, - tag, prefix_str ? prefix : NULL, - !!supernets_only, type, - ospf_instance_id, table, !!ng, - &ctx); + do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, !!fib, !!json, + tag, prefix_str ? prefix : NULL, !!supernets_only, + type, ospf_instance_id, table, !!ng, &ctx); } if (json) vty_json_close(vty, first_vrf_json); @@ -1756,21 +1688,27 @@ DEFPY (show_route, return CMD_SUCCESS; if (table_all) - do_show_ip_route_all(vty, zvrf, afi, !!fib, !!json, tag, - prefix_str ? prefix : NULL, - !!supernets_only, type, + do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag, + prefix_str ? prefix : NULL, !!supernets_only, type, ospf_instance_id, !!ng, &ctx); else - do_show_ip_route(vty, vrf->name, afi, SAFI_UNICAST, - !!fib, !!json, tag, - prefix_str ? prefix : NULL, - !!supernets_only, type, + do_show_ip_route(vty, vrf->name, afi, safi, !!fib, !!json, tag, + prefix_str ? prefix : NULL, !!supernets_only, type, ospf_instance_id, table, !!ng, &ctx); } return CMD_SUCCESS; } +ALIAS_DEPRECATED (show_route, + show_ip_rpf_cmd, + "show rpf$mrib [json$json]", + SHOW_STR + IP_STR + IPV6_STR + "Display RPF information for multicast source\n" + JSON_STR); + ALIAS_HIDDEN (show_route, show_ro_cmd, "show ro", @@ -1784,28 +1722,38 @@ DEFPY (show_route_detail, show_route_detail_cmd, "show\ <\ - ip$ipv4 [vrf ]\ - <\ - A.B.C.D$address\ - |A.B.C.D/M$prefix\ - >\ - |ipv6$ipv6 [vrf ]\ - <\ - X:X::X:X$address\ - |X:X::X:X/M$prefix\ - >\ - >\ - [json$json] [nexthop-group$ng]", + ip$ipv4 \ + [{\ + mrib$mrib\ + |vrf \ + }]\ + <\ + A.B.C.D$address\ + |A.B.C.D/M$prefix\ + >\ + |ipv6$ipv6 \ + [{\ + mrib$mrib\ + |vrf \ + }]\ + <\ + X:X::X:X$address\ + |X:X::X:X/M$prefix\ + >\ + >\ + [json$json] [nexthop-group$ng]", SHOW_STR IP_STR "IP forwarding table\n" "IP routing table\n" + "Multicast SAFI table\n" VRF_FULL_CMD_HELP_STR "Network in the IP routing table to display\n" "IP prefix /, e.g., 35.0.0.0/8\n" IP6_STR "IPv6 forwarding table\n" "IPv6 routing table\n" + "Multicast SAFI table\n" VRF_FULL_CMD_HELP_STR "IPv6 Address\n" "IPv6 prefix\n" @@ -1813,6 +1761,7 @@ DEFPY (show_route_detail, "Nexthop Group Information\n") { afi_t afi = ipv4 ? AFI_IP : AFI_IP6; + safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST; struct route_table *table; struct prefix p; struct route_node *rn; @@ -1833,8 +1782,7 @@ DEFPY (show_route_detail, struct zebra_vrf *zvrf; RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { - if ((zvrf = vrf->info) == NULL - || (table = zvrf->table[afi][SAFI_UNICAST]) == NULL) + if ((zvrf = vrf->info) == NULL || (table = zvrf->table[afi][safi]) == NULL) continue; rn = route_node_match(table, &p); @@ -1855,7 +1803,7 @@ DEFPY (show_route_detail, if (json) vty_show_ip_route_detail_json(vty, rn, use_fib); else - vty_show_ip_route_detail(vty, rn, 0, use_fib, + vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib, show_ng); route_unlock_node(rn); @@ -1880,7 +1828,7 @@ DEFPY (show_route_detail, if (vrf_name) VRF_GET_ID(vrf_id, vrf_name, false); - table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); + table = zebra_vrf_table(afi, safi, vrf_id); if (!table) return CMD_SUCCESS; @@ -1908,7 +1856,8 @@ DEFPY (show_route_detail, if (json) vty_show_ip_route_detail_json(vty, rn, use_fib); else - vty_show_ip_route_detail(vty, rn, 0, use_fib, show_ng); + vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib, + show_ng); route_unlock_node(rn); } @@ -1918,12 +1867,13 @@ DEFPY (show_route_detail, DEFPY (show_route_summary, show_route_summary_cmd, - "show route [vrf ] \ + "show route [{mrib$mrib|vrf }] \ summary [table (1-4294967295)$table_id] [prefix$prefix] [json]", SHOW_STR IP_STR IP6_STR "IP routing table\n" + "Multicast SAFI table\n" VRF_FULL_CMD_HELP_STR "Summary of all routes\n" "Table to display summary for\n" @@ -1932,6 +1882,7 @@ DEFPY (show_route_summary, JSON_STR) { afi_t afi = ipv4 ? AFI_IP : AFI_IP6; + safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST; struct route_table *table; bool uj = use_json(argc, argv); json_object *vrf_json = NULL; @@ -1948,12 +1899,11 @@ DEFPY (show_route_summary, continue; if (table_id == 0) - table = zebra_vrf_table(afi, SAFI_UNICAST, - zvrf->vrf->vrf_id); + table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id); else - table = zebra_vrf_lookup_table_with_table_id( - afi, SAFI_UNICAST, zvrf->vrf->vrf_id, - table_id); + table = zebra_vrf_lookup_table_with_table_id(afi, safi, + zvrf->vrf->vrf_id, + table_id); if (!table) continue; @@ -1975,10 +1925,9 @@ DEFPY (show_route_summary, VRF_GET_ID(vrf_id, vrf_name, false); if (table_id == 0) - table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); + table = zebra_vrf_table(afi, safi, vrf_id); else - table = zebra_vrf_lookup_table_with_table_id( - afi, SAFI_UNICAST, vrf_id, table_id); + table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id, table_id); if (!table) return CMD_SUCCESS; @@ -1991,50 +1940,49 @@ DEFPY (show_route_summary, return CMD_SUCCESS; } -DEFUN_HIDDEN (show_route_zebra_dump, +DEFPY_HIDDEN (show_route_zebra_dump, show_route_zebra_dump_cmd, - "show zebra route dump [vrf VRFNAME]", + "show zebra route dump [{mrib$mrib|vrf }]", SHOW_STR IP_STR IP6_STR "Zebra daemon\n" "Routing table\n" "All information\n" - VRF_CMD_HELP_STR) + "Multicast SAFI table\n" + VRF_FULL_CMD_HELP_STR) { - afi_t afi = AFI_IP; + afi_t afi = ipv4 ? AFI_IP : AFI_IP6; + safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST; struct route_table *table; - const char *vrf_name = NULL; - int idx = 0; - - afi = strmatch(argv[1]->text, "ipv6") ? AFI_IP6 : AFI_IP; - if (argv_find(argv, argc, "vrf", &idx)) - vrf_name = argv[++idx]->arg; - - if (!vrf_name) { + if (vrf_all) { struct vrf *vrf; struct zebra_vrf *zvrf; RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { zvrf = vrf->info; - if ((zvrf == NULL) - || (zvrf->table[afi][SAFI_UNICAST] == NULL)) + if (zvrf == NULL) continue; - table = zvrf->table[afi][SAFI_UNICAST]; - show_ip_route_dump_vty(vty, table); + table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id); + if (!table) + continue; + + show_ip_route_dump_vty(vty, table, afi, safi); } } else { vrf_id_t vrf_id = VRF_DEFAULT; - VRF_GET_ID(vrf_id, vrf_name, true); + if (vrf_name) + VRF_GET_ID(vrf_id, vrf_name, false); + + table = zebra_vrf_table(afi, safi, vrf_id); - table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); if (!table) return CMD_SUCCESS; - show_ip_route_dump_vty(vty, table); + show_ip_route_dump_vty(vty, table, afi, safi); } return CMD_SUCCESS; @@ -2128,7 +2076,8 @@ static void show_ip_route_nht_dump(struct vty *vty, } } -static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table) +static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi, + safi_t safi) { struct route_node *rn; struct route_entry *re; @@ -2140,7 +2089,7 @@ static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table) struct nexthop *nexthop = NULL; int nexthop_num = 0; - vty_out(vty, "\nIPv4/IPv6 Routing table dump\n"); + vty_out(vty, "\n%s %s Routing table dump\n", afi2str(afi), safi2str(safi)); vty_out(vty, "----------------------------\n"); for (rn = route_top(table); rn; rn = route_next(rn)) { @@ -4314,15 +4263,12 @@ void zebra_vty_init(void) install_element(VIEW_NODE, &show_vrf_cmd); install_element(VIEW_NODE, &show_vrf_vni_cmd); install_element(VIEW_NODE, &show_route_cmd); + install_element(VIEW_NODE, &show_ip_rpf_cmd); install_element(VIEW_NODE, &show_ro_cmd); install_element(VIEW_NODE, &show_route_detail_cmd); install_element(VIEW_NODE, &show_route_summary_cmd); install_element(VIEW_NODE, &show_ip_nht_cmd); - install_element(VIEW_NODE, &show_ip_rpf_cmd); - install_element(VIEW_NODE, &show_ip_rpf_addr_cmd); - install_element(VIEW_NODE, &show_ipv6_rpf_addr_cmd); - install_element(CONFIG_NODE, &rnh_hide_backups_cmd); install_element(VIEW_NODE, &show_frr_cmd); From 62baa3214d30661e6240f9569da05cd662bf3816 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Tue, 24 Sep 2024 22:10:41 +0000 Subject: [PATCH 04/11] test: Fix tests for zebra changes Remove use of `ip multicast rpf-lookup-mode` from unrelated tests. Looks like this test was just unlucky enough to pick that command as an example for use here. Just changed it to something less likely to be removed in the future. Update route table output to include AFI SAFI output. Signed-off-by: Nathan Bahr --- .../all_protocol_startup/r1/ipv4_routes.ref | 1 + .../all_protocol_startup/r1/ipv6_routes.ref | 1 + .../mgmt_config/r1/early-end-zebra.conf | 2 +- .../mgmt_config/r1/early-end2-zebra.conf | 2 +- .../mgmt_config/r1/early-exit-zebra.conf | 2 +- .../mgmt_config/r1/early-exit2-zebra.conf | 2 +- .../mgmt_config/r1/one-exit-zebra.conf | 2 +- .../mgmt_config/r1/one-exit2-zebra.conf | 2 +- tests/topotests/mgmt_config/test_config.py | 26 +++++-------------- .../r1/zebra-vrf-default.txt | 1 + .../r1/zebra-vrf-neno.txt | 2 +- .../r2/zebra-vrf-default.txt | 1 + .../r2/zebra-vrf-ray.txt | 2 +- .../r3/zebra-vrf-default.txt | 3 +-- .../r4/zebra-vrf-default.txt | 2 +- .../ospf_netns_vrf/r1/zebraroute.txt | 3 +-- .../ospf_netns_vrf/r1/zebraroutedown.txt | 3 +-- .../ospf_netns_vrf/r2/zebraroute.txt | 3 +-- .../ospf_netns_vrf/r2/zebraroutedown.txt | 3 +-- .../ospf_netns_vrf/r3/zebraroute.txt | 3 +-- .../ospf_netns_vrf/r3/zebraroutedown.txt | 3 +-- 21 files changed, 27 insertions(+), 42 deletions(-) diff --git a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref index 33c44780b454..a188ad92fc4e 100644 --- a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref +++ b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref @@ -1,3 +1,4 @@ +IPv4 unicast VRF default: C>* 192.168.0.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 192.168.1.0/26 is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 192.168.2.0/26 is directly connected, r1-eth2, weight 1, XX:XX:XX diff --git a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref index f5c1d6d7d232..4cb8692f90c7 100644 --- a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref +++ b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref @@ -1,3 +1,4 @@ +IPv6 unicast VRF default: C>* fc00:0:0:1::/64 is directly connected, r1-eth1, weight 1, XX:XX:XX C>* fc00:0:0:2::/64 is directly connected, r1-eth2, weight 1, XX:XX:XX C>* fc00:0:0:3::/64 is directly connected, r1-eth3, weight 1, XX:XX:XX diff --git a/tests/topotests/mgmt_config/r1/early-end-zebra.conf b/tests/topotests/mgmt_config/r1/early-end-zebra.conf index 44a2f968253e..926540f9bcc6 100644 --- a/tests/topotests/mgmt_config/r1/early-end-zebra.conf +++ b/tests/topotests/mgmt_config/r1/early-end-zebra.conf @@ -1,6 +1,6 @@ allow-external-route-update end -ip multicast rpf-lookup-mode urib-only +router-id 1.2.3.4 end ip table range 2 3 end \ No newline at end of file diff --git a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf index 37619d52ace1..b8514f324ff3 100644 --- a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf +++ b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf @@ -1,7 +1,7 @@ conf t allow-external-route-update end -ip multicast rpf-lookup-mode urib-only +router-id 1.2.3.4 end ip table range 2 3 end \ No newline at end of file diff --git a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf index 44f202dbcbb6..990351685b1f 100644 --- a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf +++ b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf @@ -1,6 +1,6 @@ allow-external-route-update exit -ip multicast rpf-lookup-mode urib-only +router-id 1.2.3.4 exit ip table range 2 3 exit \ No newline at end of file diff --git a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf index c7109bfd395d..5a783f449244 100644 --- a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf +++ b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf @@ -1,7 +1,7 @@ conf t allow-external-route-update exit -ip multicast rpf-lookup-mode urib-only +router-id 1.2.3.4 exit ip table range 2 3 exit \ No newline at end of file diff --git a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf index 0c38459702a6..c8396fec7039 100644 --- a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf +++ b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf @@ -1,3 +1,3 @@ allow-external-route-update exit -ip multicast rpf-lookup-mode urib-only +router-id 1.2.3.4 diff --git a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf index 34acb76d92d9..3a50f6d13687 100644 --- a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf +++ b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf @@ -1,4 +1,4 @@ conf t allow-external-route-update exit -ip multicast rpf-lookup-mode urib-only \ No newline at end of file +router-id 1.2.3.4 \ No newline at end of file diff --git a/tests/topotests/mgmt_config/test_config.py b/tests/topotests/mgmt_config/test_config.py index 1d732223fff8..627a564a6607 100644 --- a/tests/topotests/mgmt_config/test_config.py +++ b/tests/topotests/mgmt_config/test_config.py @@ -153,7 +153,7 @@ def cleanup_config(r1, tempdir, logpath): yield r1.cmd_nostatus("vtysh -c 'conf t' -c 'no allow-external-route-update'") - r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip multicast rpf-lookup-mode urib-only'") + r1.cmd_nostatus("vtysh -c 'conf t' -c 'no router-id 1.2.3.4'") r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip table range 2 3'") logbuf = save_log_snippet(logpath, logbuf, "/dev/null") @@ -290,9 +290,7 @@ def test_zebra_one_exit_file(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" not in showrun - ), "zebra second conf present, unexpected" + assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected" def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath): @@ -307,9 +305,7 @@ def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" not in showrun - ), "zebra second conf present, unexpected" + assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected" def test_zebra_early_exit_file(r1, confdir, tempdir, logpath): @@ -324,9 +320,7 @@ def test_zebra_early_exit_file(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" not in showrun - ), "zebra second conf present, unexpected" + assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected" assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected" @@ -342,9 +336,7 @@ def test_zebra_early_exit_redir(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" not in showrun - ), "zebra second conf present, unexpected" + assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected" assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected" @@ -360,9 +352,7 @@ def test_zebra_early_end_file(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" in showrun - ), "zebra second conf missing" + assert "router-id 1.2.3.4" in showrun, "zebra second conf missing" assert "ip table range 2 3" in showrun, "zebra third missing" @@ -378,7 +368,5 @@ def test_zebra_early_end_redir(r1, confdir, tempdir, logpath): showrun = r1.cmd_nostatus("vtysh -c 'show running'") assert "allow-external-route-update" in showrun, "zebra conf missing" - assert ( - "ip multicast rpf-lookup-mode urib-only" not in showrun - ), "zebra second conf present, unexpected" + assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected" assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected" diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt index 131085a47aa5..e4787be3c986 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt @@ -1,3 +1,4 @@ +IPv4 unicast VRF default: O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt index 45ee1071d406..2f893c3d96d5 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt @@ -1,4 +1,4 @@ -VRF neno: +IPv4 unicast VRF neno: O>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2, weight 1, XX:XX:XX B>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX O 10.0.30.0/24 [110/10] is directly connected, r1-eth2, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt index f3724bbb9ffe..07ec7226fa79 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt @@ -1,3 +1,4 @@ +IPv4 unicast VRF default: S>* 0.0.0.0/0 [1/0] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX O>* 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt index 0f8b12bdfab3..f409034b807c 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt @@ -1,4 +1,4 @@ -VRF ray: +IPv4 unicast VRF ray: B 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX B>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt index db4e268cb018..2af9d2460d78 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt @@ -1,3 +1,4 @@ +IPv4 unicast VRF default: O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r3-eth0, weight 1, XX:XX:XX L>* 10.0.3.3/32 is directly connected, r3-eth0, weight 1, XX:XX:XX @@ -6,5 +7,3 @@ O 10.0.30.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.30.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX L>* 10.0.30.3/32 is directly connected, r3-eth1, weight 1, XX:XX:XX O>* 10.0.40.0/24 [110/20] via 10.0.30.1, r3-eth1, weight 1, XX:XX:XX - - diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt index 4865708578e2..013073795b9d 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt @@ -1,3 +1,4 @@ +IPv4 unicast VRF default: O>* 10.0.3.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX O 10.0.4.0/24 [110/10] is directly connected, r4-eth0, weight 1, XX:XX:XX C>* 10.0.4.0/24 is directly connected, r4-eth0, weight 1, XX:XX:XX @@ -6,4 +7,3 @@ O>* 10.0.30.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX O 10.0.40.0/24 [110/10] is directly connected, r4-eth1, weight 1, XX:XX:XX C>* 10.0.40.0/24 is directly connected, r4-eth1, weight 1, XX:XX:XX L>* 10.0.40.4/32 is directly connected, r4-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt index 68fd30d4ccba..82cc2d9136f2 100644 --- a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt +++ b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt @@ -1,4 +1,4 @@ -VRF r1-ospf-cust1: +IPv4 unicast VRF r1-ospf-cust1: O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX @@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt index f0bce905b132..d6ad2a250000 100644 --- a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt +++ b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt @@ -1,4 +1,4 @@ -VRF r1-ospf-cust1: +IPv4 unicast VRF r1-ospf-cust1: O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX @@ -6,4 +6,3 @@ O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt index 098eceb28bed..effcbc46345c 100644 --- a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt +++ b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt @@ -1,4 +1,4 @@ -VRF r2-ospf-cust1: +IPv4 unicast VRF r2-ospf-cust1: O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX @@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt index a9300f8dfafb..7321b184a391 100644 --- a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt +++ b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt @@ -1,4 +1,4 @@ -VRF r2-ospf-cust1: +IPv4 unicast VRF r2-ospf-cust1: O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX @@ -6,4 +6,3 @@ L>* 10.0.2.1/32 is directly connected, r2-eth0, weight 1, XX:XX:XX O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt index f58beb81a729..3fea04bd1966 100644 --- a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt +++ b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt @@ -1,4 +1,4 @@ -VRF r3-ospf-cust1: +IPv4 unicast VRF r3-ospf-cust1: O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX @@ -7,4 +7,3 @@ L>* 10.0.3.1/32 is directly connected, r3-eth0, weight 1, XX:XX:XX O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX - diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt index cfedf8fcb472..3287355ce0f1 100644 --- a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt +++ b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt @@ -1,5 +1,4 @@ -VRF r3-ospf-cust1: +IPv4 unicast VRF r3-ospf-cust1: O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX - From 9ed7d4bf93f885fce33d9cfc564c58ff126cbbfe Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Wed, 2 Oct 2024 19:03:48 +0000 Subject: [PATCH 05/11] pimd,yang: Reimplement RPF lookup vty in router pim Add rpf-lookup-mode MODE vty command under router pim block. Including NB piping and config write. Using the mode still pending. Signed-off-by: Nathan Bahr --- pimd/pim_cmd.c | 20 +++++++++++++ pimd/pim_instance.h | 1 + pimd/pim_nb.c | 7 +++++ pimd/pim_nb.h | 4 +++ pimd/pim_nb_config.c | 70 ++++++++++++++++++++++++++++++++++++++++++-- pimd/pim_rpf.h | 11 +++++++ pimd/pim_vty.c | 10 +++++++ yang/frr-pim.yang | 54 ++++++++++++++++++++++++++++++++-- 8 files changed, 172 insertions(+), 5 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 934da2d53e67..4ac3eaba8ee5 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -8743,6 +8743,24 @@ DEFPY_ATTR(ip_pim_mlag, return ret; } +DEFPY_YANG(pim_rpf_lookup_mode, pim_rpf_lookup_mode_cmd, + "[no] rpf-lookup-mode ![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode", + NO_STR + "RPF lookup behavior\n" + "Lookup in unicast RIB only\n" + "Lookup in multicast RIB only\n" + "Try multicast RIB first, fall back to unicast RIB\n" + "Lookup both, use entry with lower distance\n" + "Lookup both, use entry with longer prefix\n") +{ + if (no) + nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_DESTROY, NULL); + else + nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_MODIFY, mode); + + return nb_cli_apply_changes(vty, NULL); +} + struct cmd_node pim_node = { .name = "pim", .node = PIM_NODE, @@ -8903,6 +8921,8 @@ void pim_cmd_init(void) install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd); install_element(PIM_NODE, &pim_bsr_candidate_bsr_cmd); + install_element(PIM_NODE, &pim_rpf_lookup_mode_cmd); + install_element(INTERFACE_NODE, &interface_ip_igmp_cmd); install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd); install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd); diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index f484d847b233..694df5bc3160 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -116,6 +116,7 @@ struct pim_instance { char *register_plist; struct hash *rpf_hash; + enum pim_rpf_lookup_mode rpf_mode; void *ssm_info; /* per-vrf SSM configuration */ diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index 66001d1463b9..9a8f682390ac 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -231,6 +231,13 @@ const struct frr_yang_module_info frr_pim_info = { .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy, } }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy, + } + }, { .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family", .cbs = { diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index befad4efe435..2ef6b817d3d4 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -95,6 +95,10 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy( struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy( + struct nb_cb_destroy_args *args); int lib_interface_pim_address_family_dr_priority_modify( struct nb_cb_modify_args *args); int lib_interface_pim_address_family_create(struct nb_cb_create_args *args); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index ea8b56fee395..aba441318081 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1672,6 +1672,73 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re return NB_OK; } +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + const char *mode; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + mode = yang_dnode_get_string(args->dnode, NULL); + + if (strmatch(mode, "none")) + pim->rpf_mode = MCAST_NO_CONFIG; + else if (strmatch(mode, "urib-only")) + pim->rpf_mode = MCAST_URIB_ONLY; + else if (strmatch(mode, "mrib-only")) + pim->rpf_mode = MCAST_MRIB_ONLY; + else if (strmatch(mode, "mrib-then-urib")) + pim->rpf_mode = MCAST_MIX_MRIB_FIRST; + else if (strmatch(mode, "lower-distance")) + pim->rpf_mode = MCAST_MIX_DISTANCE; + else if (strmatch(mode, "longer-prefix")) + pim->rpf_mode = MCAST_MIX_PFXLEN; + else { + snprintfrr(args->errmsg, args->errmsg_len, + "Invalid RPF lookup mode specified: %s", mode); + return CMD_WARNING_CONFIG_FAILED; + } + + /* TODO: Signal to redo lookups? */ + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + pim->rpf_mode = MCAST_NO_CONFIG; + /* TODO: Signal to redo lookups? */ + break; + } + + return NB_OK; +} + /* * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family */ @@ -2429,9 +2496,8 @@ int lib_interface_pim_address_family_mroute_oif_modify( #ifdef PIM_ENFORCE_LOOPFREE_MFC iif = nb_running_get_entry(args->dnode, NULL, false); - if (!iif) { + if (!iif) return NB_OK; - } pim_iifp = iif->info; pim = pim_iifp->pim; diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h index 7dae53f8fc81..b3eead631b16 100644 --- a/pimd/pim_rpf.h +++ b/pimd/pim_rpf.h @@ -41,6 +41,17 @@ struct pim_rpf { enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE }; +/* RPF lookup behaviour */ +enum pim_rpf_lookup_mode { + MCAST_MRIB_ONLY = 0, /* MRIB only */ + MCAST_URIB_ONLY, /* URIB only */ + MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */ + MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */ + MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */ + /* on equal value, MRIB wins for last 2 */ + MCAST_NO_CONFIG, /* MIX_MRIB_FIRST, but no show in config write */ +}; + struct pim_upstream; unsigned int pim_rpf_hash_key(const void *arg); diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index b633e81d5536..de8493033e67 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -265,6 +265,16 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty) vty_out(vty, "\n"); } + if (pim->rpf_mode != MCAST_NO_CONFIG) { + ++writes; + vty_out(vty, " rpf-lookup-mode %s\n", + pim->rpf_mode == MCAST_URIB_ONLY ? "urib-only" + : pim->rpf_mode == MCAST_MRIB_ONLY ? "mrib-only" + : pim->rpf_mode == MCAST_MIX_MRIB_FIRST ? "mrib-then-urib" + : pim->rpf_mode == MCAST_MIX_DISTANCE ? "lower-distance" + : "longer-prefix"); + } + return writes; } diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang index 6a6c52185ddb..e28113a68611 100644 --- a/yang/frr-pim.yang +++ b/yang/frr-pim.yang @@ -78,6 +78,47 @@ module frr-pim { type string; } + /* + * Multicast RPF mode configurable type + */ + + typedef mcast-rpf-lookup-mode { + type enumeration { + enum "none" { + value 0; + description + "No mode set."; + } + enum "mrib-only" { + value 1; + description + "Lookup in unicast RIB only."; + } + enum "urib-only" { + value 2; + description + "Lookup in multicast RIB only."; + } + enum "mrib-then-urib" { + value 3; + description + "Try multicast RIB first, fall back to unicast RIB."; + } + enum "lower-distance" { + value 4; + description + "Lookup both unicast and mcast, use entry with lower distance."; + } + enum "longer-prefix" { + value 5; + description + "Lookup both unicast and mcast, use entry with longer prefix."; + } + } + description + "Multicast RPF lookup behavior"; + } + /* * Groupings */ @@ -157,20 +198,27 @@ module frr-pim { description "A grouping defining per address family pim global attributes"; + leaf mcast-rpf-lookup { + type mcast-rpf-lookup-mode; + default "none"; + description + "Multicast RPF lookup behavior."; + } + leaf ecmp { type boolean; default "false"; description "Enable PIM ECMP."; } - + leaf ecmp-rebalance { type boolean; default "false"; description "Enable PIM ECMP Rebalance."; } - + leaf keep-alive-timer { type uint16 { range "1..max"; @@ -179,7 +227,7 @@ module frr-pim { description "Keep alive Timer in seconds."; } - + leaf rp-keep-alive-timer { type uint16 { range "1..max"; From 0ad036f95831c9bb93d97fd472757a3a02876686 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Wed, 23 Oct 2024 18:48:57 +0000 Subject: [PATCH 06/11] pimd: Refactor synchronous nexthop lookup Add prefix length in nexthop response. Apply lookup mode to the sychronous lookups, where we may lookup the MRIB, URIB, or both and make a decision based on the nexthop. Signed-off-by: Nathan Bahr --- pimd/pim_zlookup.c | 96 +++++++++++++++++++++++++++++++++++++++++++--- pimd/pim_zlookup.h | 1 + 2 files changed, 91 insertions(+), 6 deletions(-) diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index e2644c531bab..febc595ad4c4 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -153,6 +153,7 @@ static int zclient_read_nexthop(struct pim_instance *pim, struct ipaddr raddr; uint8_t distance; uint32_t metric; + uint16_t prefix_len; int nexthop_num; int i, err; @@ -193,8 +194,14 @@ static int zclient_read_nexthop(struct pim_instance *pim, distance = stream_getc(s); metric = stream_getl(s); + prefix_len = stream_getw(s); nexthop_num = stream_getw(s); + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), distance=%d, metric=%d, prefix_len=%d, nexthop_num=%d", + __func__, &addr, pim->vrf->name, distance, metric, prefix_len, + nexthop_num); + if (nexthop_num < 1 || nexthop_num > router->multipath) { if (PIM_DEBUG_PIM_NHT_DETAIL) zlog_debug("%s: socket %d bad nexthop_num=%d", __func__, @@ -220,6 +227,7 @@ static int zclient_read_nexthop(struct pim_instance *pim, } nexthop_tab[num_ifindex].protocol_distance = distance; nexthop_tab[num_ifindex].route_metric = metric; + nexthop_tab[num_ifindex].prefix_len = prefix_len; nexthop_tab[num_ifindex].vrf_id = nexthop_vrf_id; switch (nexthop_type) { case NEXTHOP_TYPE_IFINDEX: @@ -301,20 +309,23 @@ static int zclient_read_nexthop(struct pim_instance *pim, } } + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), num_ifindex=%d", __func__, &addr, pim->vrf->name, + num_ifindex); + return num_ifindex; } -static int zclient_lookup_nexthop_once(struct pim_instance *pim, - struct pim_zlookup_nexthop nexthop_tab[], - const int tab_size, pim_addr addr) +static int zclient_rib_lookup(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[], + const int tab_size, pim_addr addr, safi_t safi) { struct stream *s; int ret; struct ipaddr ipaddr; if (PIM_DEBUG_PIM_NHT_DETAIL) - zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr, - pim->vrf->name); + zlog_debug("%s: addr=%pPAs(%s), %sRIB", __func__, &addr, pim->vrf->name, + (safi == SAFI_MULTICAST ? "M" : "U")); /* Check socket. */ if (zlookup->sock < 0) { @@ -339,7 +350,7 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim, stream_reset(s); zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, pim->vrf->vrf_id); stream_put_ipaddr(s, &ipaddr); - stream_putc(s, SAFI_MULTICAST); // TODO NEB Set the real safi + stream_putc(s, safi); stream_putw_at(s, 0, stream_get_endp(s)); ret = writen(zlookup->sock, s->data, stream_get_endp(s)); @@ -362,6 +373,79 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim, return zclient_read_nexthop(pim, zlookup, nexthop_tab, tab_size, addr); } +static int zclient_lookup_nexthop_once(struct pim_instance *pim, + struct pim_zlookup_nexthop nexthop_tab[], const int tab_size, + pim_addr addr) +{ + if (pim->rpf_mode == MCAST_MRIB_ONLY) + return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_MULTICAST); + + if (pim->rpf_mode == MCAST_URIB_ONLY) + return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_UNICAST); + + /* All other modes require looking up both tables and making a choice */ + struct pim_zlookup_nexthop mrib_tab[tab_size]; + struct pim_zlookup_nexthop urib_tab[tab_size]; + int mrib_num; + int urib_num; + + memset(mrib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size); + memset(urib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size); + + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), looking up both MRIB and URIB", __func__, &addr, + pim->vrf->name); + + mrib_num = zclient_rib_lookup(pim, mrib_tab, tab_size, addr, SAFI_MULTICAST); + urib_num = zclient_rib_lookup(pim, urib_tab, tab_size, addr, SAFI_UNICAST); + + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), MRIB nexthops=%d, URIB nexthops=%d", __func__, + &addr, pim->vrf->name, mrib_num, urib_num); + + /* If only one table has results, use that always */ + if (mrib_num < 1) { + if (urib_num > 0) + memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); + return urib_num; + } + + if (urib_num < 1) { + if (mrib_num > 0) + memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); + return mrib_num; + } + + /* See if we should use the URIB based on configured lookup mode */ + /* Both tables have results, so compare them. Distance and prefix length are the same for all + * nexthops, so only compare the first in the list + */ + if (pim->rpf_mode == MCAST_MIX_DISTANCE && + mrib_tab[0].protocol_distance > urib_tab[0].protocol_distance) { + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), URIB has shortest distance", __func__, + &addr, pim->vrf->name); + memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); + return urib_num; + } else if (pim->rpf_mode == MCAST_MIX_PFXLEN && + mrib_tab[0].prefix_len < urib_tab[0].prefix_len) { + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), URIB has lengthest prefix length", __func__, + &addr, pim->vrf->name); + memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); + return urib_num; + } + + /* All others use the MRIB */ + /* For MCAST_MIX_MRIB_FIRST (and by extension, MCAST_NO_CONFIG), + * always return mrib if both have results + */ + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: addr=%pPAs(%s), MRIB has nexthops", __func__, &addr, pim->vrf->name); + memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size); + return mrib_num; +} + void zclient_lookup_read_pipe(struct event *thread) { struct zclient *zlookup = EVENT_ARG(thread); diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h index ee2dd20113a0..c9461eb7e3df 100644 --- a/pimd/pim_zlookup.h +++ b/pimd/pim_zlookup.h @@ -21,6 +21,7 @@ struct pim_zlookup_nexthop { ifindex_t ifindex; uint32_t route_metric; uint8_t protocol_distance; + uint16_t prefix_len; }; void zclient_lookup_new(void); From be6606ad1e46da0b000089422f625af7d7efefd2 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Wed, 23 Oct 2024 19:00:31 +0000 Subject: [PATCH 07/11] pimd: Refactor pim NHT Refactor the next hop tracking in PIM to fully support the configured RPF lookup mode. Moved many NHT related functions to pim_nht.h/c NHT now tracks both MRIB and URIB tables and makes nexthop decisions based on the configured lookup mode. Signed-off-by: Nathan Bahr --- pimd/pim_bsm.c | 2 +- pimd/pim_bsr_rpdb.c | 4 +- pimd/pim_cmd.c | 2 +- pimd/pim_cmd_common.c | 5 +- pimd/pim_iface.c | 29 +- pimd/pim_igmp_mtrace.c | 9 +- pimd/pim_instance.c | 12 +- pimd/pim_instance.h | 2 +- pimd/pim_mroute.c | 8 +- pimd/pim_msdp.c | 3 +- pimd/pim_nb_config.c | 2 +- pimd/pim_nht.c | 1314 ++++++++++++++++++++++++---------------- pimd/pim_nht.h | 101 ++- pimd/pim_rp.c | 84 +-- pimd/pim_rp.h | 2 - pimd/pim_rpf.c | 142 +---- pimd/pim_rpf.h | 8 +- pimd/pim_tib.c | 9 +- pimd/pim_upstream.c | 2 +- pimd/pim_vxlan.c | 7 +- 20 files changed, 927 insertions(+), 820 deletions(-) diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index a44e4e08f3d5..c6087bfff2f2 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -727,7 +727,7 @@ void pim_bsm_clear(struct pim_instance *pim) __func__, &nht_p); } - pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info); + pim_nht_delete_tracked(pim, nht_p, NULL, rp_info); if (!pim_get_all_mcast_group(&g_all)) return; diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c index 6e93b65f4b99..02e7a69ff176 100644 --- a/pimd/pim_bsr_rpdb.c +++ b/pimd/pim_bsr_rpdb.c @@ -413,11 +413,11 @@ void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc) struct bsr_crp_rp *rp, ref; bool ok; - ref.addr = pnc->rpf.rpf_addr; + ref.addr = pnc->addr; rp = bsr_crp_rps_find(scope->ebsr_rps, &ref); assertf(rp, "addr=%pPA", &ref.addr); - ok = CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID); + ok = pim_nht_pnc_is_valid(pim, pnc); if (ok == rp->nht_ok) return; diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 4ac3eaba8ee5..397e19a7e016 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -3304,7 +3304,7 @@ DEFUN (show_ip_rib, return CMD_WARNING; } - if (!pim_nexthop_lookup(vrf->info, &nexthop, addr, 0)) { + if (!pim_nht_lookup(vrf->info, &nexthop, addr, 0)) { vty_out(vty, "Failure querying RIB nexthop for unicast address %s\n", addr_str); diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index 02ddea8252d6..fb2f05723969 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -2998,10 +2998,7 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty, pim_addr_to_prefix(&grp, group); memset(&nexthop, 0, sizeof(nexthop)); - result = - pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0); - - if (!result) { + if (!pim_nht_lookup_ecmp(v->info, &nexthop, vif_source, &grp, false)) { vty_out(vty, "Nexthop Lookup failed, no usable routes returned.\n"); return CMD_SUCCESS; diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 20e3ba184ba8..c1d784886e47 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -601,26 +601,13 @@ void pim_if_addr_add(struct connected *ifc) ifp->name); } } - struct pim_nexthop_cache *pnc = NULL; - struct pim_rpf rpf; - struct zclient *zclient = NULL; - - zclient = pim_zebra_zclient_get(); - /* RP config might come prior to (local RP's interface) - IF UP event. - In this case, pnc would not have pim enabled - nexthops. - Once Interface is UP and pim info is available, - reregister - with RNH address to receive update and add the - interface as nexthop. */ - memset(&rpf, 0, sizeof(struct pim_rpf)); - rpf.rpf_addr = pim_addr_from_prefix(ifc->address); - pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf); - if (pnc) - pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient, - pnc, - ZEBRA_NEXTHOP_REGISTER); + + /* RP config might come prior to local RP's interface IF UP event. + * In this case, pnc would not have pim enabled nexthops. Once + * Interface is UP and pim info is available, reregister with RNH + * address to receive update and add the interface as nexthop. + */ + pim_nht_get(pim_ifp->pim, pim_addr_from_prefix(ifc->address)); } } /* pim */ @@ -2036,7 +2023,7 @@ void pim_pim_interface_delete(struct interface *ifp) * pim_ifp->pim_neighbor_list. */ pim_sock_delete(ifp, "pim unconfigured on interface"); - pim_upstream_nh_if_update(pim_ifp->pim, ifp); + pim_nht_upstream_if_update(pim_ifp->pim, ifp); if (!pim_ifp->gm_enable) { pim_if_addr_del_all(ifp); diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c index 309da138d2b6..ad6f265101ba 100644 --- a/pimd/pim_igmp_mtrace.c +++ b/pimd/pim_igmp_mtrace.c @@ -16,6 +16,7 @@ #include "pim_oil.h" #include "pim_ifchannel.h" #include "pim_macro.h" +#include "pim_nht.h" #include "pim_igmp_mtrace.h" static struct in_addr mtrace_primary_address(struct interface *ifp) @@ -58,14 +59,14 @@ static bool mtrace_fwd_info_weak(struct pim_instance *pim, memset(&nexthop, 0, sizeof(nexthop)); - if (!pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1)) { + if (!pim_nht_lookup(pim, &nexthop, mtracep->src_addr, 1)) { if (PIM_DEBUG_MTRACE) zlog_debug("mtrace not found neighbor"); return false; } if (PIM_DEBUG_MTRACE) - zlog_debug("mtrace pim_nexthop_lookup OK"); + zlog_debug("mtrace pim_nht_lookup OK"); if (PIM_DEBUG_MTRACE) zlog_debug("mtrace next_hop=%pPAs", &nexthop.mrib_nexthop_addr); @@ -353,7 +354,7 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr, if (interface == NULL) { memset(&nexthop, 0, sizeof(nexthop)); - if (!pim_nexthop_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) { + if (!pim_nht_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) { if (PIM_DEBUG_MTRACE) zlog_debug( "Dropping mtrace packet, no route to destination"); @@ -535,7 +536,7 @@ static int mtrace_send_response(struct pim_instance *pim, } else { memset(&nexthop, 0, sizeof(nexthop)); /* TODO: should use unicast rib lookup */ - if (!pim_nexthop_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) { + if (!pim_nht_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) { if (PIM_DEBUG_MTRACE) zlog_debug( "Dropped response qid=%ud, no route to response address", diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index f7c5ea3bcf34..281d541e97b9 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -15,6 +15,7 @@ #include "pim_ssm.h" #include "pim_rpf.h" #include "pim_rp.h" +#include "pim_nht.h" #include "pim_mroute.h" #include "pim_oil.h" #include "pim_static.h" @@ -44,8 +45,7 @@ static void pim_instance_terminate(struct pim_instance *pim) pim_bsm_proc_free(pim); - /* Traverse and cleanup rpf_hash */ - hash_clean_and_free(&pim->rpf_hash, (void *)pim_rp_list_hash_clean); + pim_nht_terminate(pim); pim_if_terminate(pim); @@ -71,7 +71,6 @@ static void pim_instance_terminate(struct pim_instance *pim) static struct pim_instance *pim_instance_init(struct vrf *vrf) { struct pim_instance *pim; - char hash_name[64]; pim = XCALLOC(MTYPE_PIM_PIM_INSTANCE, sizeof(struct pim_instance)); @@ -92,12 +91,7 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf) pim_msdp_init(pim, router->master); pim_vxlan_init(pim); - snprintf(hash_name, sizeof(hash_name), "PIM %s RPF Hash", vrf->name); - pim->rpf_hash = hash_create_size(256, pim_rpf_hash_key, pim_rpf_equal, - hash_name); - - if (PIM_DEBUG_ZEBRA) - zlog_debug("%s: NHT rpf hash init ", __func__); + pim_nht_init(pim); pim->ssm_info = pim_ssm_init(); diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index 694df5bc3160..e9af72147d7b 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -115,7 +115,7 @@ struct pim_instance { /* The name of the register-accept prefix-list */ char *register_plist; - struct hash *rpf_hash; + struct hash *nht_hash; enum pim_rpf_lookup_mode rpf_mode; void *ssm_info; /* per-vrf SSM configuration */ diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index adc47e719d24..c431665d2553 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -35,6 +35,7 @@ #include "pim_sock.h" #include "pim_vxlan.h" #include "pim_msg.h" +#include "pim_nht.h" static void mroute_read_on(struct pim_instance *pim); static int pim_upstream_mroute_update(struct channel_oil *c_oil, @@ -555,8 +556,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf, * setting the SPTBIT to true */ if (!(pim_addr_is_any(up->upstream_register)) && - pim_nexthop_lookup(pim_ifp->pim, &source, - up->upstream_register, 0)) { + pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) { pim_register_stop_send(source.interface, &sg, pim_ifp->primary_address, up->upstream_register); @@ -569,9 +569,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf, __func__); } else { if (I_am_RP(pim_ifp->pim, up->sg.grp)) { - if (pim_nexthop_lookup(pim_ifp->pim, &source, - up->upstream_register, - 0)) + if (pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) pim_register_stop_send( source.interface, &sg, pim_ifp->primary_address, diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 215cc3c5029c..7b6b971ea71c 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -26,6 +26,7 @@ #include "pim_time.h" #include "pim_upstream.h" #include "pim_oil.h" +#include "pim_nht.h" #include "pim_msdp.h" #include "pim_msdp_packet.h" @@ -705,7 +706,7 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp) } /* check if the MSDP peer is the nexthop for the RP */ - if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) && + if (pim_nht_lookup(mp->pim, &nexthop, rp, 0) && nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) { return true; } diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index aba441318081..eeb352882e48 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -144,7 +144,7 @@ static int pim_cmd_interface_add(struct interface *ifp) pim_ifp->pim_enable = true; pim_if_addr_add_all(ifp); - pim_upstream_nh_if_update(pim_ifp->pim, ifp); + pim_nht_upstream_if_update(pim_ifp->pim, ifp); pim_if_membership_refresh(ifp); pim_if_create_pimreg(pim_ifp->pim); diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index f2dbfa9765c8..334ad68da726 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -38,118 +38,267 @@ * pim_sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister * command to Zebra. */ -void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient, - struct pim_nexthop_cache *pnc, int command) +static void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient, pim_addr addr, + int command) { struct prefix p; int ret; - pim_addr_to_prefix(&p, pnc->rpf.rpf_addr); - ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false, - pim->vrf->vrf_id); + pim_addr_to_prefix(&p, addr); + + /* Register to track nexthops from the MRIB */ + ret = zclient_send_rnh(zclient, command, &p, SAFI_MULTICAST, false, false, pim->vrf->vrf_id); + if (ret == ZCLIENT_SEND_FAILURE) + zlog_warn( + "sendmsg_nexthop: zclient_send_message() failed registering MRIB tracking"); + + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: MRIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__, + (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name, + ret); + + /* Also register to track nexthops from the URIB */ + ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false, pim->vrf->vrf_id); if (ret == ZCLIENT_SEND_FAILURE) - zlog_warn("sendmsg_nexthop: zclient_send_message() failed"); + zlog_warn( + "sendmsg_nexthop: zclient_send_message() failed registering URIB tracking"); if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", - __func__, - (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, - pim->vrf->name, ret); + zlog_debug("%s: URIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__, + (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name, + ret); return; } -struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim, - struct pim_rpf *rpf) +static struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim, pim_addr addr) { struct pim_nexthop_cache *pnc = NULL; struct pim_nexthop_cache lookup; - lookup.rpf.rpf_addr = rpf->rpf_addr; - pnc = hash_lookup(pim->rpf_hash, &lookup); + lookup.addr = addr; + pnc = hash_lookup(pim->nht_hash, &lookup); return pnc; } -static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim, - struct pim_rpf *rpf_addr) +static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim, pim_addr addr) { struct pim_nexthop_cache *pnc; char hash_name[64]; - pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE, - sizeof(struct pim_nexthop_cache)); - pnc->rpf.rpf_addr = rpf_addr->rpf_addr; + /* This function is only ever called if we are unable to find an entry, so + * the hash_get should always add a new entry + */ + pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE, sizeof(struct pim_nexthop_cache)); + pnc->addr = addr; - pnc = hash_get(pim->rpf_hash, pnc, hash_alloc_intern); + pnc = hash_get(pim->nht_hash, pnc, hash_alloc_intern); pnc->rp_list = list_new(); pnc->rp_list->cmp = pim_rp_list_cmp; - snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash", - &pnc->rpf.rpf_addr, pim->vrf->name); - pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key, - pim_upstream_equal, hash_name); + snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash", &pnc->addr, + pim->vrf->name); + pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key, pim_upstream_equal, + hash_name); return pnc; } -static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, - pim_addr addr) +static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +{ + switch (pim->rpf_mode) { + case MCAST_MRIB_ONLY: + return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED); + + case MCAST_URIB_ONLY: + return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED); + + case MCAST_MIX_MRIB_FIRST: + case MCAST_NO_CONFIG: + case MCAST_MIX_DISTANCE: + case MCAST_MIX_PFXLEN: + /* This check is to determine if we've received an answer necessary to make a NH decision. + * For the mixed modes, where we may lookup from MRIB or URIB, let's require an answer + * for both tables. + */ + return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED) && + CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED); + + default: + break; + } + return false; +} + +static struct pim_nexthop_cache_rib *pim_pnc_get_rib(struct pim_instance *pim, + struct pim_nexthop_cache *pnc) +{ + struct pim_nexthop_cache_rib *pnc_rib = NULL; + + if (pim->rpf_mode == MCAST_MRIB_ONLY) + pnc_rib = &pnc->mrib; + else if (pim->rpf_mode == MCAST_URIB_ONLY) + pnc_rib = &pnc->urib; + else if (pim->rpf_mode == MCAST_MIX_MRIB_FIRST || pim->rpf_mode == MCAST_NO_CONFIG) { + if (pnc->mrib.nexthop_num > 0) + pnc_rib = &pnc->mrib; + else + pnc_rib = &pnc->urib; + } else if (pim->rpf_mode == MCAST_MIX_DISTANCE) { + if (pnc->mrib.distance <= pnc->urib.distance) + pnc_rib = &pnc->mrib; + else + pnc_rib = &pnc->urib; + } else if (pim->rpf_mode == MCAST_MIX_PFXLEN) { + if (pnc->mrib.prefix_len >= pnc->urib.prefix_len) + pnc_rib = &pnc->mrib; + else + pnc_rib = &pnc->urib; + } + + return pnc_rib; +} + +bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +{ + switch (pim->rpf_mode) { + case MCAST_MRIB_ONLY: + return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID); + + case MCAST_URIB_ONLY: + return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID); + + case MCAST_MIX_MRIB_FIRST: + case MCAST_NO_CONFIG: + case MCAST_MIX_DISTANCE: + case MCAST_MIX_PFXLEN: + /* The valid flag is set if there are nexthops...so when doing mixed, mrib might not have + * any nexthops, so consider valid if at least one RIB is valid + */ + return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID) || + CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID); + + default: + break; + } + return false; +} + +struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr) { struct pim_nexthop_cache *pnc = NULL; - struct pim_rpf rpf; struct zclient *zclient = NULL; zclient = pim_zebra_zclient_get(); - memset(&rpf, 0, sizeof(rpf)); - rpf.rpf_addr = addr; + pnc = pim_nexthop_cache_find(pim, addr); - pnc = pim_nexthop_cache_find(pim, &rpf); - if (!pnc) { - pnc = pim_nexthop_cache_add(pim, &rpf); - pim_sendmsg_zebra_rnh(pim, zclient, pnc, - ZEBRA_NEXTHOP_REGISTER); - if (PIM_DEBUG_PIM_NHT_DETAIL) - zlog_debug( - "%s: NHT cache and zebra notification added for %pPA(%s)", - __func__, &addr, pim->vrf->name); - } + if (pnc) + return pnc; + + pnc = pim_nexthop_cache_add(pim, addr); + pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_REGISTER); + + if (PIM_DEBUG_PIM_NHT_DETAIL) + zlog_debug("%s: NHT cache and zebra notification added for %pPA(%s)", __func__, + &addr, pim->vrf->name); return pnc; } -/* TBD: this does several distinct things and should probably be split up. - * (checking state vs. returning pnc vs. adding upstream vs. adding rp) +void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr, + struct interface *ifp) +{ + struct nexthop *nh_node = NULL; + struct interface *ifp1 = NULL; + + for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) { + /* If the gateway is already set, then keep it */ +#if PIM_IPV == 4 + if (!pim_addr_is_any(nh_node->gate.ipv4)) + continue; +#else + if (!pim_addr_is_any(nh_node->gate.ipv6)) + continue; +#endif + + /* Only set gateway on the correct interface */ + ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + if (ifp != ifp1) + continue; + + /* Update the gateway address with the given address */ +#if PIM_IPV == 4 + nh_node->gate.ipv4 = addr; +#else + nh_node->gate.ipv6 = addr; +#endif + if (PIM_DEBUG_PIM_NHT_RP) + zlog_debug("%s: addr %pPA new MRIB nexthop addr %pPAs interface %s", + __func__, &pnc->addr, &addr, ifp1->name); + } + + /* Now do the same with URIB nexthop entries */ + for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) { +#if PIM_IPV == 4 + if (!pim_addr_is_any(nh_node->gate.ipv4)) + continue; +#else + if (!pim_addr_is_any(nh_node->gate.ipv6)) + continue; +#endif + + ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + + if (ifp != ifp1) + continue; + +#if PIM_IPV == 4 + nh_node->gate.ipv4 = addr; +#else + nh_node->gate.ipv6 = addr; +#endif + if (PIM_DEBUG_PIM_NHT_RP) + zlog_debug("%s: addr %pPA new URIB nexthop addr %pPAs interface %s", + __func__, &pnc->addr, &addr, ifp1->name); + } +} + +/* Finds the nexthop cache entry for the given address. If no cache, add it for tracking. + * Up and/or rp may be given to add to the nexthop cache entry so that they get updates when the nexthop changes + * If out_pnc is not null, then copy the nexthop cache entry to it. + * Return true if an entry was found and is valid. */ -int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr, - struct pim_upstream *up, struct rp_info *rp, - struct pim_nexthop_cache *out_pnc) +bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up, + struct rp_info *rp, struct pim_nexthop_cache *out_pnc) { struct pim_nexthop_cache *pnc; struct listnode *ch_node = NULL; + /* This will find the entry and add it to tracking if not found */ pnc = pim_nht_get(pim, addr); assertf(up || rp, "addr=%pPA", &addr); + /* Store the RP if provided and not currently in the list */ if (rp != NULL) { ch_node = listnode_lookup(pnc->rp_list, rp); if (ch_node == NULL) listnode_add_sort(pnc->rp_list, rp); } + /* Store the upstream if provided and not currently in the list */ if (up != NULL) (void)hash_get(pnc->upstream_hash, up, hash_alloc_intern); - if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) { + if (pim_nht_pnc_is_valid(pim, pnc)) { if (out_pnc) memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache)); - return 1; + return true; } - return 0; + return false; } void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr) @@ -157,7 +306,6 @@ void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr) struct pim_nexthop_cache *pnc; pnc = pim_nht_get(pim, addr); - pnc->bsr_count++; } @@ -166,47 +314,47 @@ bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr) struct pim_nexthop_cache *pnc; pnc = pim_nht_get(pim, addr); - pnc->candrp_count++; - return CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID); + return pim_nht_pnc_is_valid(pim, pnc); } -static void pim_nht_drop_maybe(struct pim_instance *pim, - struct pim_nexthop_cache *pnc) +static void pim_nht_drop_maybe(struct pim_instance *pim, struct pim_nexthop_cache *pnc) { if (PIM_DEBUG_PIM_NHT) zlog_debug("%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u Cand-RP count:%u", - __func__, &pnc->rpf.rpf_addr, pim->vrf->name, - pnc->rp_list->count, pnc->upstream_hash->count, - pnc->bsr_count, pnc->candrp_count); + __func__, &pnc->addr, pim->vrf->name, pnc->rp_list->count, + pnc->upstream_hash->count, pnc->bsr_count, pnc->candrp_count); - if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 && - pnc->bsr_count == 0 && pnc->candrp_count == 0) { + if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 && pnc->bsr_count == 0 && + pnc->candrp_count == 0) { struct zclient *zclient = pim_zebra_zclient_get(); - pim_sendmsg_zebra_rnh(pim, zclient, pnc, - ZEBRA_NEXTHOP_UNREGISTER); + pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_UNREGISTER); list_delete(&pnc->rp_list); + hash_free(pnc->upstream_hash); + hash_release(pim->nht_hash, pnc); + + if (pnc->urib.nexthop) + nexthops_free(pnc->urib.nexthop); + if (pnc->mrib.nexthop) + nexthops_free(pnc->mrib.nexthop); - hash_release(pim->rpf_hash, pnc); - if (pnc->nexthop) - nexthops_free(pnc->nexthop); XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc); } } -void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr, - struct pim_upstream *up, struct rp_info *rp) +void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up, + struct rp_info *rp) { struct pim_nexthop_cache *pnc = NULL; struct pim_nexthop_cache lookup; struct pim_upstream *upstream = NULL; /* Remove from RPF hash if it is the last entry */ - lookup.rpf.rpf_addr = addr; - pnc = hash_lookup(pim->rpf_hash, &lookup); + lookup.addr = addr; + pnc = hash_lookup(pim->nht_hash, &lookup); if (!pnc) { zlog_warn("attempting to delete nonexistent NHT entry %pPA", &addr); @@ -251,9 +399,9 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr) if (pim_addr_is_any(addr)) return; - lookup.rpf.rpf_addr = addr; + lookup.addr = addr; - pnc = hash_lookup(pim->rpf_hash, &lookup); + pnc = hash_lookup(pim->nht_hash, &lookup); if (!pnc) { zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA", @@ -272,9 +420,9 @@ void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr) struct pim_nexthop_cache *pnc = NULL; struct pim_nexthop_cache lookup; - lookup.rpf.rpf_addr = addr; + lookup.addr = addr; - pnc = hash_lookup(pim->rpf_hash, &lookup); + pnc = hash_lookup(pim->nht_hash, &lookup); if (!pnc) { zlog_warn("attempting to delete nonexistent NHT C-RP entry %pPA", @@ -297,10 +445,10 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, struct nexthop *nh; struct interface *ifp; - lookup.rpf.rpf_addr = bsr_addr; + lookup.addr = bsr_addr; - pnc = hash_lookup(pim->rpf_hash, &lookup); - if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) { + pnc = hash_lookup(pim->nht_hash, &lookup); + if (!pnc || !pim_nht_pnc_has_answer(pim, pnc)) { /* BSM from a new freshly registered BSR - do a synchronous * zebra query since otherwise we'd drop the first packet, * leading to additional delay in picking up BSM data @@ -359,91 +507,92 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, return false; } - if (!CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) - return false; - - /* if we accept BSMs from more than one ECMP nexthop, this will cause - * BSM message "multiplication" for each ECMP hop. i.e. if you have - * 4-way ECMP and 4 hops you end up with 256 copies of each BSM - * message. - * - * so... only accept the first (IPv4) valid nexthop as source. - */ + if (pim_nht_pnc_is_valid(pim, pnc)) { + /* if we accept BSMs from more than one ECMP nexthop, this will cause + * BSM message "multiplication" for each ECMP hop. i.e. if you have + * 4-way ECMP and 4 hops you end up with 256 copies of each BSM + * message. + * + * so... only accept the first (IPv4) valid nexthop as source. + */ + struct pim_nexthop_cache_rib *rib = pim_pnc_get_rib(pim, pnc); - for (nh = pnc->nexthop; nh; nh = nh->next) { - pim_addr nhaddr; + for (nh = rib->nexthop; nh; nh = nh->next) { + pim_addr nhaddr; - switch (nh->type) { + switch (nh->type) { #if PIM_IPV == 4 - case NEXTHOP_TYPE_IPV4: - if (nh->ifindex == IFINDEX_INTERNAL) - continue; + case NEXTHOP_TYPE_IPV4: + if (nh->ifindex == IFINDEX_INTERNAL) + continue; - fallthrough; - case NEXTHOP_TYPE_IPV4_IFINDEX: - nhaddr = nh->gate.ipv4; - break; - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV6_IFINDEX: - continue; -#else - case NEXTHOP_TYPE_IPV6: - if (nh->ifindex == IFINDEX_INTERNAL) + fallthrough; + case NEXTHOP_TYPE_IPV4_IFINDEX: + nhaddr = nh->gate.ipv4; + break; + + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: continue; +#else + case NEXTHOP_TYPE_IPV6: + if (nh->ifindex == IFINDEX_INTERNAL) + continue; - fallthrough; - case NEXTHOP_TYPE_IPV6_IFINDEX: - nhaddr = nh->gate.ipv6; - break; - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - continue; + fallthrough; + case NEXTHOP_TYPE_IPV6_IFINDEX: + nhaddr = nh->gate.ipv6; + break; + + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + continue; #endif - case NEXTHOP_TYPE_IFINDEX: - nhaddr = bsr_addr; - break; + case NEXTHOP_TYPE_IFINDEX: + nhaddr = bsr_addr; + break; - case NEXTHOP_TYPE_BLACKHOLE: - continue; - } + case NEXTHOP_TYPE_BLACKHOLE: + continue; + } - ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id); - if (!ifp || !ifp->info) - continue; + ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id); + if (!ifp || !ifp->info) + continue; - if (if_is_loopback(ifp) && if_is_loopback(src_ifp)) - return true; + if (if_is_loopback(ifp) && if_is_loopback(src_ifp)) + return true; - /* MRIB (IGP) may be pointing at a router where PIM is down */ - nbr = pim_neighbor_find(ifp, nhaddr, true); - if (!nbr) - continue; + /* MRIB (IGP) may be pointing at a router where PIM is down */ + nbr = pim_neighbor_find(ifp, nhaddr, true); + if (!nbr) + continue; - /* Are we on the correct interface? */ - if (nh->ifindex == src_ifp->ifindex) { - /* Do we have the correct NH ? */ - if (!pim_addr_cmp(nhaddr, src_ip)) - return true; - /* - * check If the packet came from the neighbor, - * and the dst is a secondary address on the connected interface - */ - return (!pim_addr_cmp(nbr->source_addr, src_ip) && - pim_if_connected_to_source(ifp, nhaddr)); + /* Are we on the correct interface? */ + if (nh->ifindex == src_ifp->ifindex) { + /* Do we have the correct NH ? */ + if (!pim_addr_cmp(nhaddr, src_ip)) + return true; + /* + * check If the packet came from the neighbor, + * and the dst is a secondary address on the connected interface + */ + return (!pim_addr_cmp(nbr->source_addr, src_ip) && + pim_if_connected_to_source(ifp, nhaddr)); + } + return false; } - return false; } return false; } -void pim_rp_nexthop_del(struct rp_info *rp_info) +void pim_nht_rp_del(struct rp_info *rp_info) { rp_info->rp.source_nexthop.interface = NULL; rp_info->rp.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY; rp_info->rp.source_nexthop.mrib_metric_preference = router->infinite_assert_metric.metric_preference; - rp_info->rp.source_nexthop.mrib_route_metric = - router->infinite_assert_metric.route_metric; + rp_info->rp.source_nexthop.mrib_route_metric = router->infinite_assert_metric.route_metric; } /* Update RP nexthop info based on Nexthop update received from Zebra.*/ @@ -461,10 +610,9 @@ static void pim_update_rp_nh(struct pim_instance *pim, ifp = rp_info->rp.source_nexthop.interface; // Compute PIM RPF using cached nexthop - if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, - rp_info->rp.rpf_addr, - &rp_info->group, 1)) - pim_rp_nexthop_del(rp_info); + if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr, + &rp_info->group, true)) + pim_nht_rp_del(rp_info); /* * If we transition from no path to a path @@ -544,33 +692,43 @@ static int pim_upstream_nh_if_update_helper(struct hash_bucket *bucket, struct pim_instance *pim = pwd->pim; struct interface *ifp = pwd->ifp; struct nexthop *nh_node = NULL; - ifindex_t first_ifindex; - for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) { - first_ifindex = nh_node->ifindex; - if (ifp != if_lookup_by_index(first_ifindex, pim->vrf->vrf_id)) - continue; + /* This update happens when an interface is added to/removed from pim. + * So go through both MRIB and URIB and update any upstreams for any + * matching nexthop + */ + for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) { + if (ifp->ifindex == nh_node->ifindex) { + if (pnc->upstream_hash->count) { + pim_update_upstream_nh(pim, pnc); + break; + } + } + } - if (pnc->upstream_hash->count) { - pim_update_upstream_nh(pim, pnc); - break; + for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) { + if (ifp->ifindex == nh_node->ifindex) { + if (pnc->upstream_hash->count) { + pim_update_upstream_nh(pim, pnc); + break; + } } } return HASHWALK_CONTINUE; } -void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp) +void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp) { struct pnc_hash_walk_data pwd; pwd.pim = pim; pwd.ifp = ifp; - hash_walk(pim->rpf_hash, pim_upstream_nh_if_update_helper, &pwd); + hash_walk(pim->nht_hash, pim_upstream_nh_if_update_helper, &pwd); } -uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp) +static uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp) { uint32_t hash_val; @@ -583,47 +741,42 @@ uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp) return hash_val; } -static int pim_ecmp_nexthop_search(struct pim_instance *pim, - struct pim_nexthop_cache *pnc, - struct pim_nexthop *nexthop, pim_addr src, - struct prefix *grp, int neighbor_needed) +static bool pim_ecmp_nexthop_search(struct pim_instance *pim, struct pim_nexthop_cache *pnc, + struct pim_nexthop *nexthop, pim_addr src, struct prefix *grp, + bool neighbor_needed) { - struct pim_neighbor *nbrs[router->multipath], *nbr = NULL; - struct interface *ifps[router->multipath]; struct nexthop *nh_node = NULL; - ifindex_t first_ifindex; - struct interface *ifp = NULL; - uint32_t hash_val = 0, mod_val = 0; - uint16_t nh_iter = 0, found = 0; - uint32_t i, num_nbrs = 0; - struct pim_interface *pim_ifp; - - if (!pnc || !pnc->nexthop_num || !nexthop) - return 0; - - pim_addr nh_addr = nexthop->mrib_nexthop_addr; - pim_addr grp_addr = pim_addr_from_prefix(grp); + uint32_t hash_val = 0; + uint32_t mod_val = 0; + uint16_t nh_iter = 0; + bool found = false; + uint32_t num_nbrs = 0; + pim_addr nh_addr; + pim_addr grp_addr; + struct pim_nexthop_cache_rib *rib; - memset(&nbrs, 0, sizeof(nbrs)); - memset(&ifps, 0, sizeof(ifps)); + /* Early return if required parameters aren't provided */ + if (!pim || !pnc || !pim_nht_pnc_is_valid(pim, pnc) || !nexthop || !grp) + return false; + nh_addr = nexthop->mrib_nexthop_addr; + grp_addr = pim_addr_from_prefix(grp); + rib = pim_pnc_get_rib(pim, pnc); - // Current Nexthop is VALID, check to stay on the current path. + /* Current Nexthop is VALID, check to stay on the current path. */ if (nexthop->interface && nexthop->interface->info && (!pim_addr_is_any(nh_addr))) { - /* User configured knob to explicitly switch - to new path is disabled or current path - metric is less than nexthop update. + /* User configured knob to explicitly switch to new path is disabled or + * current path metric is less than nexthop update. */ + if (!pim->ecmp_rebalance_enable) { + bool curr_route_valid = false; - if (pim->ecmp_rebalance_enable == 0) { - uint8_t curr_route_valid = 0; - // Check if current nexthop is present in new updated - // Nexthop list. - // If the current nexthop is not valid, candidate to - // choose new Nexthop. - for (nh_node = pnc->nexthop; nh_node; - nh_node = nh_node->next) { + /* Check if current nexthop is present in new updated Nexthop list. + * If the current nexthop is not valid, candidate to choose new + * Nexthop. + */ + for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) { curr_route_valid = (nexthop->interface->ifindex == nh_node->ifindex); if (curr_route_valid) @@ -633,9 +786,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, if (curr_route_valid && !pim_if_connected_to_source(nexthop->interface, src)) { - nbr = pim_neighbor_find( - nexthop->interface, - nexthop->mrib_nexthop_addr, true); + struct pim_neighbor *nbr = + pim_neighbor_find(nexthop->interface, + nexthop->mrib_nexthop_addr, true); if (!nbr && !if_is_loopback(nexthop->interface)) { if (PIM_DEBUG_PIM_NHT) @@ -646,10 +799,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, /* update metric even if the upstream * neighbor stays unchanged */ - nexthop->mrib_metric_preference = - pnc->distance; - nexthop->mrib_route_metric = - pnc->metric; + nexthop->mrib_metric_preference = rib->distance; + nexthop->mrib_route_metric = rib->metric; if (PIM_DEBUG_PIM_NHT) zlog_debug( "%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection", @@ -657,40 +808,39 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, &grp_addr, pim->vrf->name, nexthop->interface->name); - return 1; + return true; } } } } - /* - * Look up all interfaces and neighbors, - * store for later usage - */ - for (nh_node = pnc->nexthop, i = 0; nh_node; - nh_node = nh_node->next, i++) { - ifps[i] = - if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); - if (ifps[i]) { + /* Count the number of neighbors for ECMP */ + for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) { + struct pim_neighbor *nbr; + struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + + if (!ifp) + continue; + #if PIM_IPV == 4 - pim_addr nhaddr = nh_node->gate.ipv4; + pim_addr nhaddr = nh_node->gate.ipv4; #else - pim_addr nhaddr = nh_node->gate.ipv6; + pim_addr nhaddr = nh_node->gate.ipv6; #endif - nbrs[i] = pim_neighbor_find(ifps[i], nhaddr, true); - if (nbrs[i] || pim_if_connected_to_source(ifps[i], src)) - num_nbrs++; - } + nbr = pim_neighbor_find(ifp, nhaddr, true); + if (nbr || pim_if_connected_to_source(ifp, src)) + num_nbrs++; } + if (pim->ecmp_enable) { struct prefix src_pfx; - uint32_t consider = pnc->nexthop_num; + uint32_t consider = rib->nexthop_num; if (neighbor_needed && num_nbrs < consider) consider = num_nbrs; if (consider == 0) - return 0; + return false; // PIM ECMP flag is enable then choose ECMP path. pim_addr_to_prefix(&src_pfx, src); @@ -698,16 +848,16 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, mod_val = hash_val % consider; } - for (nh_node = pnc->nexthop; nh_node && (found == 0); - nh_node = nh_node->next) { - first_ifindex = nh_node->ifindex; - ifp = ifps[nh_iter]; + for (nh_node = rib->nexthop; nh_node && !found; nh_node = nh_node->next) { + struct pim_neighbor *nbr = NULL; + struct pim_interface *pim_ifp; + struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + if (!ifp) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s %s: could not find interface for ifindex %d (address %pPA(%s))", - __FILE__, __func__, first_ifindex, &src, - pim->vrf->name); + zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))", + __FILE__, __func__, nh_node->ifindex, &src, + pim->vrf->name); if (nh_iter == mod_val) mod_val++; // Select nexthpath nh_iter++; @@ -718,10 +868,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, if (!pim_ifp || !pim_ifp->pim_enable) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)", - __func__, ifp->name, pim->vrf->name, - first_ifindex, &src); + zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)", + __func__, ifp->name, pim->vrf->name, nh_node->ifindex, + &src); if (nh_iter == mod_val) mod_val++; // Select nexthpath nh_iter++; @@ -729,7 +878,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, } if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) { - nbr = nbrs[nh_iter]; +#if PIM_IPV == 4 + nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4, true); +#else + nbr = pim_neighbor_find(ifp, nh_node->gate.ipv6, true); +#endif + if (!nbr && !if_is_loopback(ifp)) { if (PIM_DEBUG_PIM_NHT) zlog_debug( @@ -750,12 +904,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, #else nexthop->mrib_nexthop_addr = nh_node->gate.ipv6; #endif - nexthop->mrib_metric_preference = pnc->distance; - nexthop->mrib_route_metric = pnc->metric; + nexthop->mrib_metric_preference = rib->distance; + nexthop->mrib_route_metric = rib->metric; nexthop->last_lookup = src; nexthop->last_lookup_time = pim_time_monotonic_usec(); nexthop->nbr = nbr; - found = 1; + found = true; if (PIM_DEBUG_PIM_NHT) zlog_debug( "%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d", @@ -766,260 +920,55 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, nh_iter++; } - if (found) - return 1; - else - return 0; + return found; } -/* This API is used to parse Registered address nexthop update coming from Zebra - */ -void pim_nexthop_update(struct vrf *vrf, struct prefix *match, - struct zapi_route *nhr) -{ - struct nexthop *nexthop; - struct nexthop *nhlist_head = NULL; - struct nexthop *nhlist_tail = NULL; - int i; - struct pim_rpf rpf; - struct pim_nexthop_cache *pnc = NULL; - struct interface *ifp = NULL; - struct pim_instance *pim; - - pim = vrf->info; - - rpf.rpf_addr = pim_addr_from_prefix(match); - pnc = pim_nexthop_cache_find(pim, &rpf); - if (!pnc) { - if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: Skipping NHT update, addr %pPA is not in local cached DB.", - __func__, &rpf.rpf_addr); - return; - } - - pnc->last_update = pim_time_monotonic_usec(); - - if (nhr->nexthop_num) { - pnc->nexthop_num = 0; - - for (i = 0; i < nhr->nexthop_num; i++) { - nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]); - switch (nexthop->type) { - case NEXTHOP_TYPE_IFINDEX: - /* - * Connected route (i.e. no nexthop), use - * RPF address from nexthop cache (i.e. - * destination) as PIM nexthop. - */ -#if PIM_IPV == 4 - nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX; - nexthop->gate.ipv4 = pnc->rpf.rpf_addr; -#else - nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX; - nexthop->gate.ipv6 = pnc->rpf.rpf_addr; -#endif - break; -#if PIM_IPV == 4 - /* RFC5549 IPv4-over-IPv6 nexthop handling: - * if we get an IPv6 nexthop in IPv4 PIM, hunt down a - * PIM neighbor and use that instead. - */ - case NEXTHOP_TYPE_IPV6_IFINDEX: { - struct interface *ifp1 = NULL; - struct pim_neighbor *nbr = NULL; - - ifp1 = if_lookup_by_index(nexthop->ifindex, - pim->vrf->vrf_id); - - if (!ifp1) - nbr = NULL; - else - /* FIXME: should really use nbr's - * secondary address list here - */ - nbr = pim_neighbor_find_if(ifp1); - - /* Overwrite with Nbr address as NH addr */ - if (nbr) - nexthop->gate.ipv4 = nbr->source_addr; - else - // Mark nexthop address to 0 until PIM - // Nbr is resolved. - nexthop->gate.ipv4 = PIMADDR_ANY; - - break; - } -#else - case NEXTHOP_TYPE_IPV6_IFINDEX: -#endif - case NEXTHOP_TYPE_IPV6: - case NEXTHOP_TYPE_IPV4: - case NEXTHOP_TYPE_IPV4_IFINDEX: - case NEXTHOP_TYPE_BLACKHOLE: - /* nothing to do for the other nexthop types */ - break; - } - - ifp = if_lookup_by_index(nexthop->ifindex, - pim->vrf->vrf_id); - if (!ifp) { - if (PIM_DEBUG_PIM_NHT) { - char buf[NEXTHOP_STRLEN]; - zlog_debug( - "%s: could not find interface for ifindex %d(%s) (addr %s)", - __func__, nexthop->ifindex, - pim->vrf->name, - nexthop2str(nexthop, buf, - sizeof(buf))); - } - nexthop_free(nexthop); - continue; - } - - if (PIM_DEBUG_PIM_NHT) { -#if PIM_IPV == 4 - pim_addr nhaddr = nexthop->gate.ipv4; -#else - pim_addr nhaddr = nexthop->gate.ipv6; -#endif - zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ", - __func__, match, pim->vrf->name, - i + 1, &nhaddr, ifp->name, - nexthop->type, nhr->distance, - nhr->metric); - } - - if (!ifp->info) { - /* - * Though Multicast is not enabled on this - * Interface store it in database otheriwse we - * may miss this update and this will not cause - * any issue, because while choosing the path we - * are ommitting the Interfaces which are not - * multicast enabled - */ - if (PIM_DEBUG_PIM_NHT) { - char buf[NEXTHOP_STRLEN]; - - zlog_debug( - "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)", - __func__, ifp->name, - pim->vrf->name, - nexthop->ifindex, - nexthop2str(nexthop, buf, - sizeof(buf))); - } - } - - if (nhlist_tail) { - nhlist_tail->next = nexthop; - nhlist_tail = nexthop; - } else { - nhlist_tail = nexthop; - nhlist_head = nexthop; - } - - // Keep track of all nexthops, even PIM-disabled ones. - pnc->nexthop_num++; - } - /* Reset existing pnc->nexthop before assigning new list */ - nexthops_free(pnc->nexthop); - pnc->nexthop = nhlist_head; - if (pnc->nexthop_num) { - pnc->flags |= PIM_NEXTHOP_VALID; - pnc->distance = nhr->distance; - pnc->metric = nhr->metric; - } - } else { - pnc->flags &= ~PIM_NEXTHOP_VALID; - pnc->nexthop_num = nhr->nexthop_num; - nexthops_free(pnc->nexthop); - pnc->nexthop = NULL; - } - SET_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED); - - if (PIM_DEBUG_PIM_NHT) - zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d", - __func__, match, pim->vrf->name, nhr->nexthop_num, - pnc->nexthop_num, vrf->vrf_id, - pnc->upstream_hash->count, listcount(pnc->rp_list)); - - pim_rpf_set_refresh_time(pim); - - if (listcount(pnc->rp_list)) - pim_update_rp_nh(pim, pnc); - if (pnc->upstream_hash->count) - pim_update_upstream_nh(pim, pnc); - - if (pnc->candrp_count) - pim_crp_nht_update(pim, pnc); -} - -int pim_ecmp_nexthop_lookup(struct pim_instance *pim, - struct pim_nexthop *nexthop, pim_addr src, - struct prefix *grp, int neighbor_needed) +bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src, + struct prefix *grp, bool neighbor_needed) { struct pim_nexthop_cache *pnc; struct pim_zlookup_nexthop nexthop_tab[router->multipath]; - struct pim_neighbor *nbrs[router->multipath], *nbr = NULL; - struct pim_rpf rpf; int num_ifindex; - struct interface *ifps[router->multipath], *ifp; - int first_ifindex; - int found = 0; + bool found = false; uint16_t i = 0; - uint32_t hash_val = 0, mod_val = 0; + uint32_t hash_val = 0; + uint32_t mod_val = 0; uint32_t num_nbrs = 0; - struct pim_interface *pim_ifp; if (PIM_DEBUG_PIM_NHT_DETAIL) - zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld", - __func__, &src, pim->vrf->name, - nexthop->last_lookup_time); - - rpf.rpf_addr = src; + zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld", __func__, &src, + pim->vrf->name, nexthop->last_lookup_time); - pnc = pim_nexthop_cache_find(pim, &rpf); + pnc = pim_nexthop_cache_find(pim, src); if (pnc) { - if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) - return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp, - neighbor_needed); + if (pim_nht_pnc_has_answer(pim, pnc)) + return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp, neighbor_needed); } - memset(nexthop_tab, 0, - sizeof(struct pim_zlookup_nexthop) * router->multipath); - num_ifindex = - zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src, - PIM_NEXTHOP_LOOKUP_MAX); + memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath); + num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src, + PIM_NEXTHOP_LOOKUP_MAX); if (num_ifindex < 1) { if (PIM_DEBUG_PIM_NHT) - zlog_warn( - "%s: could not find nexthop ifindex for address %pPA(%s)", - __func__, &src, pim->vrf->name); - return 0; + zlog_warn("%s: could not find nexthop ifindex for address %pPA(%s)", + __func__, &src, pim->vrf->name); + return false; } - memset(&nbrs, 0, sizeof(nbrs)); - memset(&ifps, 0, sizeof(ifps)); - - /* - * Look up all interfaces and neighbors, - * store for later usage - */ + /* Count the number of neighbors for ECMP computation */ for (i = 0; i < num_ifindex; i++) { - ifps[i] = if_lookup_by_index(nexthop_tab[i].ifindex, - pim->vrf->vrf_id); - if (ifps[i]) { - nbrs[i] = pim_neighbor_find( - ifps[i], nexthop_tab[i].nexthop_addr, true); - - if (nbrs[i] || pim_if_connected_to_source(ifps[i], src)) - num_nbrs++; - } + struct pim_neighbor *nbr; + struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id); + + if (!ifp) + continue; + + nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true); + if (nbr || pim_if_connected_to_source(ifp, src)) + num_nbrs++; } - // If PIM ECMP enable then choose ECMP path. + /* If PIM ECMP enable then choose ECMP path. */ if (pim->ecmp_enable) { struct prefix src_pfx; uint32_t consider = num_ifindex; @@ -1028,30 +977,27 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim, consider = num_nbrs; if (consider == 0) - return 0; + return false; pim_addr_to_prefix(&src_pfx, src); hash_val = pim_compute_ecmp_hash(&src_pfx, grp); mod_val = hash_val % consider; if (PIM_DEBUG_PIM_NHT_DETAIL) - zlog_debug("%s: hash_val %u mod_val %u", __func__, - hash_val, mod_val); + zlog_debug("%s: hash_val %u mod_val %u", __func__, hash_val, mod_val); } - i = 0; - while (!found && (i < num_ifindex)) { - first_ifindex = nexthop_tab[i].ifindex; + for (i = 0; i < num_ifindex && !found; i++) { + struct pim_neighbor *nbr = NULL; + struct pim_interface *pim_ifp; + struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id); - ifp = ifps[i]; if (!ifp) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s %s: could not find interface for ifindex %d (address %pPA(%s))", - __FILE__, __func__, first_ifindex, &src, - pim->vrf->name); + zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))", + __FILE__, __func__, nexthop_tab[i].ifindex, &src, + pim->vrf->name); if (i == mod_val) mod_val++; - i++; continue; } @@ -1059,99 +1005,431 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim, if (!pim_ifp || !pim_ifp->pim_enable) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)", - __func__, ifp->name, pim->vrf->name, - first_ifindex, &src); + zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)", + __func__, ifp->name, pim->vrf->name, + nexthop_tab[i].ifindex, &src); if (i == mod_val) mod_val++; - i++; continue; } + if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) { - nbr = nbrs[i]; + nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true); if (PIM_DEBUG_PIM_NHT_DETAIL) - zlog_debug("ifp name: %s(%s), pim nbr: %p", - ifp->name, pim->vrf->name, nbr); + zlog_debug("ifp name: %s(%s), pim nbr: %p", ifp->name, + pim->vrf->name, nbr); if (!nbr && !if_is_loopback(ifp)) { + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)", + __func__, &nexthop_tab[i].nexthop_addr, + ifp->name, pim->vrf->name, &src); if (i == mod_val) mod_val++; - if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)", - __func__, - &nexthop_tab[i].nexthop_addr, - ifp->name, pim->vrf->name, - &src); - i++; continue; } } if (i == mod_val) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d", - __func__, &nexthop_tab[i].nexthop_addr, - &src, ifp->name, pim->vrf->name, - nexthop_tab[i].route_metric, - nexthop_tab[i].protocol_distance); + zlog_debug("%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d", + __func__, &nexthop_tab[i].nexthop_addr, &src, ifp->name, + pim->vrf->name, nexthop_tab[i].route_metric, + nexthop_tab[i].protocol_distance); /* update nexthop data */ nexthop->interface = ifp; - nexthop->mrib_nexthop_addr = - nexthop_tab[i].nexthop_addr; - nexthop->mrib_metric_preference = - nexthop_tab[i].protocol_distance; - nexthop->mrib_route_metric = - nexthop_tab[i].route_metric; + nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr; + nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance; + nexthop->mrib_route_metric = nexthop_tab[i].route_metric; nexthop->last_lookup = src; nexthop->last_lookup_time = pim_time_monotonic_usec(); nexthop->nbr = nbr; - found = 1; + found = true; } - i++; } - if (found) - return 1; - else - return 0; + return found; } -int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src, - struct prefix *grp) +bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr, + int neighbor_needed) +{ + struct pim_zlookup_nexthop nexthop_tab[router->multipath]; + struct pim_neighbor *nbr = NULL; + int num_ifindex; + struct interface *ifp = NULL; + ifindex_t first_ifindex = 0; + bool found = false; + int i = 0; + struct pim_interface *pim_ifp; + +#if PIM_IPV == 4 + /* + * We should not attempt to lookup a + * 255.255.255.255 address, since + * it will never work + */ + if (pim_addr_is_any(addr)) + return false; +#endif + + if ((!pim_addr_cmp(nexthop->last_lookup, addr)) && + (nexthop->last_lookup_time > pim->last_route_change_time)) { + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: Using last lookup for %pPAs at %lld, %" PRId64 " addr %pPAs", + __func__, &addr, nexthop->last_lookup_time, + pim->last_route_change_time, &nexthop->mrib_nexthop_addr); + pim->nexthop_lookups_avoided++; + return true; + } + + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64, __func__, + &addr, nexthop->last_lookup_time, pim->last_route_change_time); + + memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath); + num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, addr, + PIM_NEXTHOP_LOOKUP_MAX); + if (num_ifindex < 1) { + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: could not find nexthop ifindex for address %pPAs", __func__, + &addr); + return false; + } + + while (!found && (i < num_ifindex)) { + first_ifindex = nexthop_tab[i].ifindex; + + ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); + if (!ifp) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: could not find interface for ifindex %d (address %pPAs)", + __func__, first_ifindex, &addr); + i++; + continue; + } + + pim_ifp = ifp->info; + if (!pim_ifp || !pim_ifp->pim_enable) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)", + __func__, ifp->name, first_ifindex, &addr); + i++; + } else if (neighbor_needed && !pim_if_connected_to_source(ifp, addr)) { + nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true); + if (PIM_DEBUG_PIM_TRACE_DETAIL) + zlog_debug("ifp name: %s, pim nbr: %p", ifp->name, nbr); + if (!nbr && !if_is_loopback(ifp)) + i++; + else + found = true; + } else + found = true; + } + + if (found) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d", + __func__, &nexthop_tab[i].nexthop_addr, &addr, ifp->name, + first_ifindex, nexthop_tab[i].route_metric, + nexthop_tab[i].protocol_distance); + + /* update nexthop data */ + nexthop->interface = ifp; + nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr; + nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance; + nexthop->mrib_route_metric = nexthop_tab[i].route_metric; + nexthop->last_lookup = addr; + nexthop->last_lookup_time = pim_time_monotonic_usec(); + nexthop->nbr = nbr; + return true; + } else + return false; +} + +int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp) { struct pim_nexthop nhop; int vif_index; ifindex_t ifindex; memset(&nhop, 0, sizeof(nhop)); - if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) { + if (!pim_nht_lookup_ecmp(pim, &nhop, src, grp, true)) { if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: could not find nexthop ifindex for address %pPA(%s)", - __func__, &src, pim->vrf->name); + zlog_debug("%s: could not find nexthop ifindex for address %pPA(%s)", + __func__, &src, pim->vrf->name); return -1; } ifindex = nhop.interface->ifindex; if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA", - __func__, ifindex, - ifindex2ifname(ifindex, pim->vrf->vrf_id), - pim->vrf->name, &src); + zlog_debug("%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA", + __func__, ifindex, ifindex2ifname(ifindex, pim->vrf->vrf_id), + pim->vrf->name, &src); vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex); if (vif_index < 0) { if (PIM_DEBUG_PIM_NHT) { - zlog_debug( - "%s: low vif_index=%d(%s) < 1 nexthop for address %pPA", - __func__, vif_index, pim->vrf->name, &src); + zlog_debug("%s: low vif_index=%d(%s) < 1 nexthop for address %pPA", + __func__, vif_index, pim->vrf->name, &src); } return -2; } return vif_index; } + +/* This API is used to parse Registered address nexthop update coming from Zebra + */ +void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr) +{ + struct nexthop *nhlist_head = NULL; + struct nexthop *nhlist_tail = NULL; + struct pim_nexthop_cache *pnc = NULL; + struct pim_nexthop_cache_rib *pnc_rib = NULL; + struct interface *ifp = NULL; + struct pim_instance *pim; + pim_addr addr; + + pim = vrf->info; + addr = pim_addr_from_prefix(match); + pnc = pim_nexthop_cache_find(pim, addr); + if (!pnc) { + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: Skipping NHT update, addr %pPA is not in local cached DB.", + __func__, &addr); + return; + } + + if (nhr->safi == SAFI_UNICAST) + pnc_rib = &pnc->urib; + else if (nhr->safi == SAFI_MULTICAST) + pnc_rib = &pnc->mrib; + else + return; + + pnc_rib->last_update = pim_time_monotonic_usec(); + SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_ANSWER_RECEIVED); + UNSET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID); + pnc_rib->nexthop_num = 0; + /* Free the existing nexthop list, resets with any valid nexthops from the update */ + nexthops_free(pnc_rib->nexthop); + pnc_rib->nexthop = NULL; + + for (int i = 0; i < nhr->nexthop_num; i++) { + struct nexthop *nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]); + + switch (nexthop->type) { + case NEXTHOP_TYPE_IFINDEX: + /* + * Connected route (i.e. no nexthop), use + * RPF address from nexthop cache (i.e. + * destination) as PIM nexthop. + */ +#if PIM_IPV == 4 + nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX; + nexthop->gate.ipv4 = pnc->addr; +#else + nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX; + nexthop->gate.ipv6 = pnc->addr; +#endif + break; + +#if PIM_IPV == 4 + /* RFC5549 IPv4-over-IPv6 nexthop handling: + * if we get an IPv6 nexthop in IPv4 PIM, hunt down a + * PIM neighbor and use that instead. + */ + case NEXTHOP_TYPE_IPV6_IFINDEX: { + struct pim_neighbor *nbr = NULL; + struct interface *ifp1 = if_lookup_by_index(nexthop->ifindex, + pim->vrf->vrf_id); + + if (ifp1) + /* FIXME: should really use nbr's + * secondary address list here + */ + nbr = pim_neighbor_find_if(ifp1); + + /* Overwrite with Nbr address as NH addr */ + if (nbr) + nexthop->gate.ipv4 = nbr->source_addr; + else + /* Mark nexthop address to 0 until PIM Nbr is resolved. */ + nexthop->gate.ipv4 = PIMADDR_ANY; + + break; + } +#else + case NEXTHOP_TYPE_IPV6_IFINDEX: +#endif + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + case NEXTHOP_TYPE_BLACKHOLE: + /* nothing to do for the other nexthop types */ + break; + } + + ifp = if_lookup_by_index(nexthop->ifindex, pim->vrf->vrf_id); + if (!ifp) { + if (PIM_DEBUG_PIM_NHT) { + char buf[NEXTHOP_STRLEN]; + zlog_debug("%s: could not find interface for ifindex %d(%s) (addr %s)", + __func__, nexthop->ifindex, pim->vrf->name, + nexthop2str(nexthop, buf, sizeof(buf))); + } + nexthop_free(nexthop); + continue; + } + + if (PIM_DEBUG_PIM_NHT) { +#if PIM_IPV == 4 + pim_addr nhaddr = nexthop->gate.ipv4; +#else + pim_addr nhaddr = nexthop->gate.ipv6; +#endif + zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ", + __func__, match, pim->vrf->name, i + 1, &nhaddr, ifp->name, + nexthop->type, nhr->distance, nhr->metric); + } + + if (!ifp->info) { + /* + * Though Multicast is not enabled on this + * Interface store it in database otheriwse we + * may miss this update and this will not cause + * any issue, because while choosing the path we + * are ommitting the Interfaces which are not + * multicast enabled + */ + if (PIM_DEBUG_PIM_NHT) { + char buf[NEXTHOP_STRLEN]; + + zlog_debug("%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)", + __func__, ifp->name, pim->vrf->name, nexthop->ifindex, + nexthop2str(nexthop, buf, sizeof(buf))); + } + } + + if (nhlist_tail) { + nhlist_tail->next = nexthop; + nhlist_tail = nexthop; + } else { + nhlist_tail = nexthop; + nhlist_head = nexthop; + } + + /* Keep track of all nexthops, even PIM-disabled ones. */ + pnc_rib->nexthop_num++; + } /* End for nexthops */ + + /* Assign the list if there are nexthops */ + if (pnc_rib->nexthop_num) { + SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID); + pnc_rib->nexthop = nhlist_head; + pnc_rib->distance = nhr->distance; + pnc_rib->metric = nhr->metric; + pnc_rib->prefix_len = nhr->prefix.prefixlen; + } + + if (PIM_DEBUG_PIM_NHT) + zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d", + __func__, match, pim->vrf->name, nhr->nexthop_num, pnc_rib->nexthop_num, + vrf->vrf_id, pnc->upstream_hash->count, listcount(pnc->rp_list)); + + pim_rpf_set_refresh_time(pim); + + if (listcount(pnc->rp_list)) + pim_update_rp_nh(pim, pnc); + if (pnc->upstream_hash->count) + pim_update_upstream_nh(pim, pnc); + + if (pnc->candrp_count) + pim_crp_nht_update(pim, pnc); +} + +static int pim_nht_hash_mode_update_helper(struct hash_bucket *bucket, void *arg) +{ + struct pim_nexthop_cache *pnc = bucket->data; + struct pnc_hash_walk_data *pwd = arg; + struct pim_instance *pim = pwd->pim; + + if (listcount(pnc->rp_list)) + pim_update_rp_nh(pim, pnc); + + if (pnc->upstream_hash->count) + pim_update_upstream_nh(pim, pnc); + + if (pnc->candrp_count) + pim_crp_nht_update(pim, pnc); + + return HASHWALK_CONTINUE; +} + +void pim_nht_mode_changed(struct pim_instance *pim) +{ + struct pnc_hash_walk_data pwd; + + /* Update the refresh time to force new lookups if needed */ + pim_rpf_set_refresh_time(pim); + + /* Force update the registered RP and upstreams for all cache entries */ + pwd.pim = pim; + hash_walk(pim->nht_hash, pim_nht_hash_mode_update_helper, &pwd); +} + +/* Cleanup pim->nht_hash each node data */ +static void pim_nht_hash_clean(void *data) +{ + struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data; + + list_delete(&pnc->rp_list); + hash_clean_and_free(&pnc->upstream_hash, NULL); + + if (pnc->mrib.nexthop) + nexthops_free(pnc->mrib.nexthop); + + if (pnc->urib.nexthop) + nexthops_free(pnc->urib.nexthop); + + XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc); +} + +static unsigned int pim_nht_hash_key(const void *arg) +{ + const struct pim_nexthop_cache *r = arg; + +#if PIM_IPV == 4 + return jhash_1word(r->addr.s_addr, 0); +#else + return jhash2(r->addr.s6_addr32, array_size(r->addr.s6_addr32), 0); +#endif +} + +static bool pim_nht_equal(const void *arg1, const void *arg2) +{ + const struct pim_nexthop_cache *r1 = arg1; + const struct pim_nexthop_cache *r2 = arg2; + + return (!pim_addr_cmp(r1->addr, r2->addr)); +} + +void pim_nht_init(struct pim_instance *pim) +{ + char hash_name[64]; + + snprintf(hash_name, sizeof(hash_name), "PIM %s NHT Hash", pim->vrf->name); + pim->nht_hash = hash_create_size(256, pim_nht_hash_key, pim_nht_equal, hash_name); + + pim->rpf_mode = MCAST_NO_CONFIG; + + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: NHT hash init: %s ", __func__, hash_name); +} + +void pim_nht_terminate(struct pim_instance *pim) +{ + /* Traverse and cleanup nht_hash */ + hash_clean_and_free(&pim->nht_hash, (void *)pim_nht_hash_clean); +} diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h index d064f714a528..0d185aad03d3 100644 --- a/pimd/pim_nht.h +++ b/pimd/pim_nht.h @@ -17,11 +17,12 @@ #include "pim_rpf.h" /* PIM nexthop cache value structure. */ -struct pim_nexthop_cache { - struct pim_rpf rpf; +struct pim_nexthop_cache_rib { /* IGP route's metric. */ uint32_t metric; uint32_t distance; + uint16_t prefix_len; + /* Nexthop number and nexthop linked list. */ uint16_t nexthop_num; struct nexthop *nexthop; @@ -29,6 +30,13 @@ struct pim_nexthop_cache { uint16_t flags; #define PIM_NEXTHOP_VALID (1 << 0) #define PIM_NEXTHOP_ANSWER_RECEIVED (1 << 1) +}; + +struct pim_nexthop_cache { + pim_addr addr; + + struct pim_nexthop_cache_rib mrib; + struct pim_nexthop_cache_rib urib; struct list *rp_list; struct hash *upstream_hash; @@ -46,36 +54,71 @@ struct pnc_hash_walk_data { struct interface *ifp; }; -void pim_nexthop_update(struct vrf *vrf, struct prefix *match, - struct zapi_route *nhr); -int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr, - struct pim_upstream *up, struct rp_info *rp, - struct pim_nexthop_cache *out_pnc); -void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr, - struct pim_upstream *up, struct rp_info *rp); -struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim, - struct pim_rpf *rpf); -uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp); -int pim_ecmp_nexthop_lookup(struct pim_instance *pim, - struct pim_nexthop *nexthop, pim_addr src, - struct prefix *grp, int neighbor_needed); -void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient, - struct pim_nexthop_cache *pnc, int command); -int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src, - struct prefix *grp); -void pim_rp_nexthop_del(struct rp_info *rp_info); - -/* for RPF check on BSM message receipt */ +/* Verify that we have nexthop information in the cache entry */ +bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc); + +/* Get (or add) the NH cache entry for the given address */ +struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr); + +/* Set the gateway address for all nexthops in the given cache entry to the given address + * unless the gateway is already set, and only if the nexthop is through the given interface. + */ +void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr, + struct interface *ifp); + +/* Track a new addr, registers an upstream or RP for updates */ +bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up, + struct rp_info *rp, struct pim_nexthop_cache *out_pnc); + +/* Track a new addr, increments BSR count */ void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr); -void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr); -/* RPF(bsr_addr) == src_ip%src_ifp? */ -bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, - struct interface *src_ifp, pim_addr src_ip); -void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp); -/* wrappers for usage with Candidate RPs in BSMs */ +/* Track a new addr, increments Cand RP count */ bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr); + +/* Delete a tracked addr with registered upstream or RP, if no-one else is interested, stop tracking */ +void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up, + struct rp_info *rp); + +/* Delete a tracked addr and decrement BSR count, if no-one else is interested, stop tracking */ +void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr); + +/* Delete a tracked addr and decrement Cand RP count, if no-one else is interested, stop tracking */ void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr); -void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc); + +/* RPF(bsr_addr) == src_ip%src_ifp? */ +bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, struct interface *src_ifp, + pim_addr src_ip); + +/* Reset the rp.source_nexthop of the given RP */ +void pim_nht_rp_del(struct rp_info *rp_info); + +/* Walk the NH cache and update every nexthop that uses the given interface */ +void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp); + +/* Lookup nexthop information for src, returned in nexthop when function returns true. + * Tries to find in cache first and does a synchronous lookup if not found in the cache. + * If neighbor_needed is true, then nexthop is only considered valid if it's to a pim + * neighbor. + * Providing the group only effects the ECMP decision, if enabled + */ +bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src, + struct prefix *grp, bool neighbor_needed); + +/* Very similar to pim_nht_lookup_ecmp, but does not check the nht cache and only does + * a synchronous lookup. No ECMP decision is made. + */ +bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr, + int neighbor_needed); + +/* Performs a pim_nht_lookup_ecmp and returns the mroute VIF index of the nexthop interface */ +int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp); + +/* Tracked nexthop update from zebra */ +void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr); + +/* NHT init and finish funcitons */ +void pim_nht_init(struct pim_instance *pim); +void pim_nht_terminate(struct pim_instance *pim); #endif diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index 0c47bc15823d..d6758ccc1afd 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -40,20 +40,6 @@ #include "pim_ssm.h" #include "termtable.h" -/* Cleanup pim->rpf_hash each node data */ -void pim_rp_list_hash_clean(void *data) -{ - struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data; - - list_delete(&pnc->rp_list); - - hash_clean_and_free(&pnc->upstream_hash, NULL); - if (pnc->nexthop) - nexthops_free(pnc->nexthop); - - XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc); -} - static void pim_rp_info_free(struct rp_info *rp_info) { XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist); @@ -392,7 +378,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up) zlog_debug( "%s: Deregister upstream %s addr %pPA with Zebra NHT", __func__, up->sg_str, &old_upstream_addr); - pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL); + pim_nht_delete_tracked(pim, old_upstream_addr, up, NULL); } /* Update the upstream address */ @@ -547,12 +533,10 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, zlog_debug("new RP %pPA for %pFX is ourselves", &rp_all->rp.rpf_addr, &rp_all->group); pim_rp_refresh_group_to_rp_mapping(pim); - pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all, - NULL); + pim_nht_find_or_track(pim, nht_p, NULL, rp_all, NULL); - if (!pim_ecmp_nexthop_lookup(pim, - &rp_all->rp.source_nexthop, - nht_p, &rp_all->group, 1)) + if (!pim_nht_lookup_ecmp(pim, &rp_all->rp.source_nexthop, nht_p, + &rp_all->group, true)) return PIM_RP_NO_PATH; return PIM_SUCCESS; } @@ -647,9 +631,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, if (PIM_DEBUG_PIM_NHT_RP) zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ", __func__, &nht_p, &rp_info->group); - pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); - if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p, - &rp_info->group, 1)) + pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL); + if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true)) return PIM_RP_NO_PATH; return PIM_SUCCESS; @@ -740,7 +723,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, if (PIM_DEBUG_PIM_NHT_RP) zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__, &nht_p); - pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info); + pim_nht_delete_tracked(pim, nht_p, NULL, rp_info); if (!pim_get_all_mcast_group(&g_all)) return PIM_RP_BAD_ADDRESS; @@ -874,10 +857,10 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr, if (PIM_DEBUG_PIM_NHT_RP) zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__, &nht_p); - pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info); + pim_nht_delete_tracked(pim, nht_p, NULL, rp_info); } - pim_rp_nexthop_del(rp_info); + pim_nht_rp_del(rp_info); listnode_delete(pim->rp_list, rp_info); /* Update the new RP address*/ @@ -911,9 +894,8 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr, zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ", __func__, &nht_p, &rp_info->group); - pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); - if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p, - &rp_info->group, 1)) { + pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL); + if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true)) { route_unlock_node(rn); return PIM_RP_NO_PATH; } @@ -939,13 +921,13 @@ void pim_rp_setup(struct pim_instance *pim) nht_p = rp_info->rp.rpf_addr; - pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); - if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, - nht_p, &rp_info->group, 1)) { + pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL); + if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, + true)) { if (PIM_DEBUG_PIM_NHT_RP) zlog_debug( "Unable to lookup nexthop for rp specified"); - pim_rp_nexthop_del(rp_info); + pim_nht_rp_del(rp_info); } } } @@ -1084,10 +1066,9 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group) zlog_debug( "%s: NHT Register RP addr %pPA grp %pFX with Zebra", __func__, &nht_p, &rp_info->group); - pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL); + pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL); pim_rpf_set_refresh_time(pim); - (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, - nht_p, &rp_info->group, 1); + pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true); return (&rp_info->rp); } @@ -1288,7 +1269,6 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr) { struct listnode *node = NULL; struct rp_info *rp_info = NULL; - struct nexthop *nh_node = NULL; pim_addr nht_p; struct pim_nexthop_cache pnc; @@ -1298,34 +1278,10 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr) nht_p = rp_info->rp.rpf_addr; memset(&pnc, 0, sizeof(struct pim_nexthop_cache)); - if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc)) - continue; - - for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) { -#if PIM_IPV == 4 - if (!pim_addr_is_any(nh_node->gate.ipv4)) - continue; -#else - if (!pim_addr_is_any(nh_node->gate.ipv6)) - continue; -#endif - - struct interface *ifp1 = if_lookup_by_index( - nh_node->ifindex, pim->vrf->vrf_id); - if (nbr->interface != ifp1) - continue; + if (!pim_nht_find_or_track(pim, nht_p, NULL, rp_info, &pnc)) + continue; -#if PIM_IPV == 4 - nh_node->gate.ipv4 = nbr->source_addr; -#else - nh_node->gate.ipv6 = nbr->source_addr; -#endif - if (PIM_DEBUG_PIM_NHT_RP) - zlog_debug( - "%s: addr %pPA new nexthop addr %pPAs interface %s", - __func__, &nht_p, &nbr->source_addr, - ifp1->name); - } + pim_nht_set_gateway(pim, &pnc, nbr->source_addr, nbr->interface); } } diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h index 24832d0dbd60..96a034eae00d 100644 --- a/pimd/pim_rp.h +++ b/pimd/pim_rp.h @@ -29,8 +29,6 @@ struct rp_info { void pim_rp_init(struct pim_instance *pim); void pim_rp_free(struct pim_instance *pim); -void pim_rp_list_hash_clean(void *data); - int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group, const char *plist, enum rp_source rp_src_flag); void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr, diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c index d18ec4943a94..75e921382549 100644 --- a/pimd/pim_rpf.c +++ b/pimd/pim_rpf.c @@ -38,120 +38,6 @@ void pim_rpf_set_refresh_time(struct pim_instance *pim) pim->last_route_change_time); } -bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, - pim_addr addr, int neighbor_needed) -{ - struct pim_zlookup_nexthop nexthop_tab[router->multipath]; - struct pim_neighbor *nbr = NULL; - int num_ifindex; - struct interface *ifp = NULL; - ifindex_t first_ifindex = 0; - int found = 0; - int i = 0; - struct pim_interface *pim_ifp; - -#if PIM_IPV == 4 - /* - * We should not attempt to lookup a - * 255.255.255.255 address, since - * it will never work - */ - if (pim_addr_is_any(addr)) - return false; -#endif - - if ((!pim_addr_cmp(nexthop->last_lookup, addr)) && - (nexthop->last_lookup_time > pim->last_route_change_time)) { - if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: Using last lookup for %pPAs at %lld, %" PRId64 - " addr %pPAs", - __func__, &addr, nexthop->last_lookup_time, - pim->last_route_change_time, - &nexthop->mrib_nexthop_addr); - pim->nexthop_lookups_avoided++; - return true; - } else { - if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64, - __func__, &addr, nexthop->last_lookup_time, - pim->last_route_change_time); - } - - memset(nexthop_tab, 0, - sizeof(struct pim_zlookup_nexthop) * router->multipath); - num_ifindex = - zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, - addr, PIM_NEXTHOP_LOOKUP_MAX); - if (num_ifindex < 1) { - if (PIM_DEBUG_PIM_NHT) - zlog_debug( - "%s %s: could not find nexthop ifindex for address %pPAs", - __FILE__, __func__, &addr); - return false; - } - - while (!found && (i < num_ifindex)) { - first_ifindex = nexthop_tab[i].ifindex; - - ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); - if (!ifp) { - if (PIM_DEBUG_ZEBRA) - zlog_debug( - "%s %s: could not find interface for ifindex %d (address %pPAs)", - __FILE__, __func__, first_ifindex, - &addr); - i++; - continue; - } - - pim_ifp = ifp->info; - if (!pim_ifp || !pim_ifp->pim_enable) { - if (PIM_DEBUG_ZEBRA) - zlog_debug( - "%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)", - __func__, ifp->name, first_ifindex, - &addr); - i++; - } else if (neighbor_needed && - !pim_if_connected_to_source(ifp, addr)) { - nbr = pim_neighbor_find( - ifp, nexthop_tab[i].nexthop_addr, true); - if (PIM_DEBUG_PIM_TRACE_DETAIL) - zlog_debug("ifp name: %s, pim nbr: %p", - ifp->name, nbr); - if (!nbr && !if_is_loopback(ifp)) - i++; - else - found = 1; - } else - found = 1; - } - - if (found) { - if (PIM_DEBUG_ZEBRA) - zlog_debug( - "%s %s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d", - __FILE__, __func__, - &nexthop_tab[i].nexthop_addr, &addr, ifp->name, - first_ifindex, nexthop_tab[i].route_metric, - nexthop_tab[i].protocol_distance); - - /* update nexthop data */ - nexthop->interface = ifp; - nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr; - nexthop->mrib_metric_preference = - nexthop_tab[i].protocol_distance; - nexthop->mrib_route_metric = nexthop_tab[i].route_metric; - nexthop->last_lookup = addr; - nexthop->last_lookup_time = pim_time_monotonic_usec(); - nexthop->nbr = nbr; - return true; - } else - return false; -} - static int nexthop_mismatch(const struct pim_nexthop *nh1, const struct pim_nexthop *nh2) { @@ -221,9 +107,9 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim, if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) || PIM_UPSTREAM_FLAG_TEST_FHR(up->flags)) neigh_needed = false; - pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL); - if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp, - neigh_needed)) { + + pim_nht_find_or_track(pim, up->upstream_addr, up, NULL, NULL); + if (!pim_nht_lookup_ecmp(pim, &rpf->source_nexthop, src, &grp, neigh_needed)) { /* Route is Deleted in Zebra, reset the stored NH data */ pim_upstream_rpf_clear(pim, up); pim_rpf_cost_change(pim, up, saved_mrib_route_metric); @@ -371,25 +257,3 @@ int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2) return 0; } - -unsigned int pim_rpf_hash_key(const void *arg) -{ - const struct pim_nexthop_cache *r = arg; - -#if PIM_IPV == 4 - return jhash_1word(r->rpf.rpf_addr.s_addr, 0); -#else - return jhash2(r->rpf.rpf_addr.s6_addr32, - array_size(r->rpf.rpf_addr.s6_addr32), 0); -#endif -} - -bool pim_rpf_equal(const void *arg1, const void *arg2) -{ - const struct pim_nexthop_cache *r1 = - (const struct pim_nexthop_cache *)arg1; - const struct pim_nexthop_cache *r2 = - (const struct pim_nexthop_cache *)arg2; - - return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr)); -} diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h index b3eead631b16..68a8bf0ff706 100644 --- a/pimd/pim_rpf.h +++ b/pimd/pim_rpf.h @@ -11,6 +11,7 @@ #include "pim_str.h" struct pim_instance; +struct pim_upstream; /* RFC 4601: @@ -52,13 +53,6 @@ enum pim_rpf_lookup_mode { MCAST_NO_CONFIG, /* MIX_MRIB_FIRST, but no show in config write */ }; -struct pim_upstream; - -unsigned int pim_rpf_hash_key(const void *arg); -bool pim_rpf_equal(const void *arg1, const void *arg2); - -bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, - pim_addr addr, int neighbor_needed); enum pim_rpf_result pim_rpf_update(struct pim_instance *pim, struct pim_upstream *up, struct pim_rpf *old, const char *caller); diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c index ac07154f86c1..aa48de65690c 100644 --- a/pimd/pim_tib.c +++ b/pimd/pim_tib.c @@ -34,16 +34,13 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif) up = pim_upstream_find(pim, &sg); if (up) { - memcpy(&nexthop, &up->rpf.source_nexthop, - sizeof(struct pim_nexthop)); - (void)pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp, - 0); + memcpy(&nexthop, &up->rpf.source_nexthop, sizeof(struct pim_nexthop)); + pim_nht_lookup_ecmp(pim, &nexthop, vif_source, &grp, false); if (nexthop.interface) input_iface_vif_index = pim_if_find_vifindex_by_ifindex( pim, nexthop.interface->ifindex); } else - input_iface_vif_index = - pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp); + input_iface_vif_index = pim_nht_lookup_ecmp_if_vif_index(pim, vif_source, &grp); if (PIM_DEBUG_ZEBRA) zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d", diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c index 7417f311377f..857cc21979d8 100644 --- a/pimd/pim_upstream.c +++ b/pimd/pim_upstream.c @@ -257,7 +257,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim, zlog_debug( "%s: Deregister upstream %s addr %pPA with Zebra NHT", __func__, up->sg_str, &up->upstream_addr); - pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL); + pim_nht_delete_tracked(pim, up->upstream_addr, up, NULL); } XFREE(MTYPE_PIM_UPSTREAM, up); diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c index f1f315cc9810..511d35bf76ef 100644 --- a/pimd/pim_vxlan.c +++ b/pimd/pim_vxlan.c @@ -411,10 +411,9 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg) * we must dereg the old nexthop and force to new "static" * iif */ - if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) { - pim_delete_tracked_nexthop(vxlan_sg->pim, - up->upstream_addr, up, NULL); - } + if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) + pim_nht_delete_tracked(vxlan_sg->pim, up->upstream_addr, up, NULL); + /* We are acting FHR; clear out use_rpt setting if any */ pim_upstream_update_use_rpt(up, false /*update_mroute*/); pim_upstream_ref(up, flags, __func__); From 93e437c75931f8ffa550d587fdb2f3dcdba73551 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 25 Oct 2024 19:10:17 +0000 Subject: [PATCH 08/11] pimd: Update nexthops when lookup mode changes Link up the RPF lookup mode changing to a force update to RP's and upstreams registered for nexthop lookup cache updates. Signed-off-by: Nathan Bahr --- pimd/pim_nb_config.c | 20 +++++++++++++++----- pimd/pim_nht.h | 3 +++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index eeb352882e48..377b9fb62c9c 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1681,6 +1681,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc struct vrf *vrf; struct pim_instance *pim; const char *mode; + enum pim_rpf_lookup_mode old_mode; switch (args->event) { case NB_EV_VALIDATE: @@ -1692,9 +1693,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc pim = vrf->info; mode = yang_dnode_get_string(args->dnode, NULL); - if (strmatch(mode, "none")) - pim->rpf_mode = MCAST_NO_CONFIG; - else if (strmatch(mode, "urib-only")) + old_mode = pim->rpf_mode; + if (strmatch(mode, "urib-only")) pim->rpf_mode = MCAST_URIB_ONLY; else if (strmatch(mode, "mrib-only")) pim->rpf_mode = MCAST_MRIB_ONLY; @@ -1710,7 +1710,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc return CMD_WARNING_CONFIG_FAILED; } - /* TODO: Signal to redo lookups? */ + if (pim->rpf_mode != old_mode && + /* MCAST_MIX_MRIB_FIRST is the default if not configured */ + (old_mode != MCAST_NO_CONFIG && pim->rpf_mode != MCAST_MIX_MRIB_FIRST)) { + pim_nht_mode_changed(pim); + } + break; } @@ -1722,6 +1727,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc { struct vrf *vrf; struct pim_instance *pim; + enum pim_rpf_lookup_mode old_mode; switch (args->event) { case NB_EV_VALIDATE: @@ -1731,8 +1737,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mc case NB_EV_APPLY: vrf = nb_running_get_entry(args->dnode, NULL, true); pim = vrf->info; + old_mode = pim->rpf_mode; pim->rpf_mode = MCAST_NO_CONFIG; - /* TODO: Signal to redo lookups? */ + /* MCAST_MIX_MRIB_FIRST is the default if not configured */ + if (old_mode != MCAST_NO_CONFIG && old_mode != MCAST_MIX_MRIB_FIRST) { + pim_nht_mode_changed(pim); + } break; } diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h index 0d185aad03d3..144139f406fa 100644 --- a/pimd/pim_nht.h +++ b/pimd/pim_nht.h @@ -117,6 +117,9 @@ int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, str /* Tracked nexthop update from zebra */ void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr); +/* RPF lookup mode changed via configuration */ +void pim_nht_mode_changed(struct pim_instance *pim); + /* NHT init and finish funcitons */ void pim_nht_init(struct pim_instance *pim); void pim_nht_terminate(struct pim_instance *pim); From 25b686f12f4be90bbd0673db2d1bf37ff639a9de Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Wed, 23 Oct 2024 19:00:57 +0000 Subject: [PATCH 09/11] pimd: Clean up pim RPF/NHT show commands Moved `show ip rpf A.B.C.D` command here from zebra, deprecated and aliased to `show ip pim nexthop-lookup`. Allow group to be optional in the lookup command. Only validate group if source is ANY. Documented setting source via RP if not provided. Added new output if ANY source + group lookup is performed and no RP is found for the group. Updated output to include souce and group for lookup. Signed-off-by: Nathan Bahr --- pimd/pim_cmd.c | 11 +++- pimd/pim_cmd_common.c | 147 +++++++++++++++++++++++------------------- 2 files changed, 89 insertions(+), 69 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 397e19a7e016..8d13c7680255 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -2930,7 +2930,7 @@ DEFPY (show_ip_pim_nexthop, DEFPY (show_ip_pim_nexthop_lookup, show_ip_pim_nexthop_lookup_cmd, - "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source A.B.C.D$group", + "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source [A.B.C.D$group]", SHOW_STR IP_STR PIM_STR @@ -2942,6 +2942,14 @@ DEFPY (show_ip_pim_nexthop_lookup, return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group); } +ALIAS_DEPRECATED (show_ip_pim_nexthop_lookup, + show_ip_rpf_source_cmd, + "show ip rpf A.B.C.D$source", + SHOW_STR + IP_STR + "Display RPF information for multicast source\n" + "Nexthop lookup for specific source address\n"); + DEFPY (show_ip_pim_interface_traffic, show_ip_pim_interface_traffic_cmd, "show ip pim [vrf NAME] interface traffic [WORD$if_name] [json$json]", @@ -9046,6 +9054,7 @@ void pim_cmd_init(void) install_element(VIEW_NODE, &show_ip_ssmpingd_cmd); install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd); install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd); + install_element(VIEW_NODE, &show_ip_rpf_source_cmd); install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd); install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd); install_element(VIEW_NODE, &show_ip_pim_bsr_rpinfo_cmd); diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index fb2f05723969..f032a55a9f91 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -2876,31 +2876,39 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg) struct vty *vty = cwd->vty; struct pim_instance *pim = cwd->pim; struct nexthop *nh_node = NULL; - ifindex_t first_ifindex; struct interface *ifp = NULL; struct ttable *tt = NULL; char *table = NULL; /* Prepare table. */ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); - ttable_add_row(tt, "Address|Interface|Nexthop"); + ttable_add_row(tt, "Address|Interface|Nexthop|Table"); tt->style.cell.rpad = 2; tt->style.corner = '+'; ttable_restyle(tt); - for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) { - first_ifindex = nh_node->ifindex; - - ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); + for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) { + ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); +#if PIM_IPV == 4 + ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL", + &nh_node->gate.ipv4, "MRIB"); +#else + ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL", + &nh_node->gate.ipv6, "MRIB"); +#endif + } + for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) { + ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); #if PIM_IPV == 4 - ttable_add_row(tt, "%pPA|%s|%pI4", &pnc->rpf.rpf_addr, - ifp ? ifp->name : "NULL", &nh_node->gate.ipv4); + ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL", + &nh_node->gate.ipv4, "URIB"); #else - ttable_add_row(tt, "%pPA|%s|%pI6", &pnc->rpf.rpf_addr, - ifp ? ifp->name : "NULL", &nh_node->gate.ipv6); + ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL", + &nh_node->gate.ipv6, "URIB"); #endif } + /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); @@ -2910,56 +2918,58 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg) return CMD_SUCCESS; } -static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet, - void *arg) +static void pim_print_json_nexthop(json_object *json_obj, struct nexthop *nh_node, + struct interface *ifp, char *addr_str, const char *type) { - struct pim_nexthop_cache *pnc = backet->data; - struct json_pnc_cache_walk_data *cwd = arg; - struct pim_instance *pim = cwd->pim; - struct nexthop *nh_node = NULL; - ifindex_t first_ifindex; - struct interface *ifp = NULL; - char addr_str[PIM_ADDRSTRLEN]; json_object *json_row = NULL; json_object *json_ifp = NULL; json_object *json_arr = NULL; struct pim_interface *pim_ifp = NULL; - bool pim_enable = false; - - for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) { - first_ifindex = nh_node->ifindex; - ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); - snprintfrr(addr_str, sizeof(addr_str), "%pPA", - &pnc->rpf.rpf_addr); - json_object_object_get_ex(cwd->json_obj, addr_str, &json_row); - if (!json_row) { - json_row = json_object_new_object(); - json_object_string_addf(json_row, "address", "%pPA", - &pnc->rpf.rpf_addr); - json_object_object_addf(cwd->json_obj, json_row, "%pPA", - &pnc->rpf.rpf_addr); - json_arr = json_object_new_array(); - json_object_object_add(json_row, "nexthops", json_arr); - } - json_ifp = json_object_new_object(); - json_object_string_add(json_ifp, "interface", - ifp ? ifp->name : "NULL"); - if (ifp) - pim_ifp = ifp->info; + if (ifp) + pim_ifp = ifp->info; - if (pim_ifp && pim_ifp->pim_enable) - pim_enable = true; + json_object_object_get_ex(json_obj, addr_str, &json_row); - json_object_boolean_add(json_ifp, "pimEnabled", pim_enable); + if (!json_row) { + json_row = json_object_new_object(); + json_object_string_addf(json_row, "address", "%s", addr_str); + json_object_object_addf(json_obj, json_row, "%s", addr_str); + json_arr = json_object_new_array(); + json_object_object_add(json_row, "nexthops", json_arr); + } + + json_ifp = json_object_new_object(); + json_object_string_add(json_ifp, "interface", ifp ? ifp->name : "NULL"); + json_object_boolean_add(json_ifp, "pimEnabled", (pim_ifp && pim_ifp->pim_enable)); #if PIM_IPV == 4 - json_object_string_addf(json_ifp, "nexthop", "%pI4", - &nh_node->gate.ipv4); + json_object_string_addf(json_ifp, "nexthop", "%pI4", &nh_node->gate.ipv4); #else - json_object_string_addf(json_ifp, "nexthop", "%pI6", - &nh_node->gate.ipv6); + json_object_string_addf(json_ifp, "nexthop", "%pI6", &nh_node->gate.ipv6); #endif - json_object_array_add(json_arr, json_ifp); + json_object_string_add(json_ifp, "table", type); + json_object_array_add(json_arr, json_ifp); +} + +static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet, void *arg) +{ + struct pim_nexthop_cache *pnc = backet->data; + struct json_pnc_cache_walk_data *cwd = arg; + json_object *json_obj = cwd->json_obj; + struct pim_instance *pim = cwd->pim; + char addr_str[PIM_ADDRSTRLEN]; + struct nexthop *nh_node = NULL; + struct interface *ifp = NULL; + + snprintfrr(addr_str, sizeof(addr_str), "%pPA", &pnc->addr); + for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) { + ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "MRIB"); + } + + for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) { + ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id); + pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "URIB"); } return CMD_SUCCESS; } @@ -2967,7 +2977,6 @@ static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet, int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty, pim_addr source, pim_addr group) { - int result = 0; pim_addr vif_source; struct prefix grp; struct pim_nexthop nexthop; @@ -2980,31 +2989,36 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty, #if PIM_IPV == 4 if (pim_is_group_224_4(source)) { - vty_out(vty, - "Invalid argument. Expected Valid Source Address.\n"); + vty_out(vty, "Invalid argument. Expected Valid Source Address.\n"); return CMD_WARNING; } - - if (!pim_is_group_224_4(group)) { - vty_out(vty, - "Invalid argument. Expected Valid Multicast Group Address.\n"); + /* Only require group if source is not provided */ + if (pim_addr_is_any(source) && !pim_is_group_224_4(group)) { + vty_out(vty, "Invalid argument. Expected Valid Multicast Group Address.\n"); return CMD_WARNING; } #endif - if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group)) + /* This call will set vif_source=source, if source is not ANY. Otherwise vif_source + * will be set to the RP address according to the group address. If no RP is configured + * for the group, then return 0 and set vif_source to ANY + */ + if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group)) { + vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no RP.\n", &source, &group); return CMD_SUCCESS; + } + pim_addr_to_prefix(&grp, group); memset(&nexthop, 0, sizeof(nexthop)); if (!pim_nht_lookup_ecmp(v->info, &nexthop, vif_source, &grp, false)) { - vty_out(vty, - "Nexthop Lookup failed, no usable routes returned.\n"); + vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no usable routes returned.\n", + &source, &group); return CMD_SUCCESS; } - vty_out(vty, "Group %pFXh --- Nexthop %pPAs Interface %s\n", &grp, + vty_out(vty, "(%pPAs, %pPAs) --- Nexthop %pPAs Interface %s\n", &source, &group, &nexthop.mrib_nexthop_addr, nexthop.interface->name); return CMD_SUCCESS; @@ -3033,19 +3047,16 @@ void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj) cwd.pim = pim; jcwd.pim = pim; - if (uj) { + if (uj) jcwd.json_obj = json_object_new_object(); - } else { - vty_out(vty, "Number of registered addresses: %lu\n", - pim->rpf_hash->count); - } + else + vty_out(vty, "Number of registered addresses: %lu\n", pim->nht_hash->count); if (uj) { - hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb, - &jcwd); + hash_walk(pim->nht_hash, pim_print_json_pnc_cache_walkcb, &jcwd); vty_json(vty, jcwd.json_obj); } else - hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd); + hash_walk(pim->nht_hash, pim_print_vty_pnc_cache_walkcb, &cwd); } int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty, From 8156ae97c84ae1cc499f90b45f40384164883a27 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 25 Oct 2024 20:03:07 +0000 Subject: [PATCH 10/11] tests: Add new pim mrib tests Test mrib overrides and rpf lookup mode changes. Signed-off-by: Nathan Bahr --- tests/topotests/pim_mrib/__init__.py | 0 tests/topotests/pim_mrib/r1/frr.conf | 28 ++ tests/topotests/pim_mrib/r2/frr.conf | 28 ++ tests/topotests/pim_mrib/r3/frr.conf | 28 ++ tests/topotests/pim_mrib/r4/frr.conf | 29 ++ tests/topotests/pim_mrib/test_pim_mrib.py | 331 ++++++++++++++++++++++ 6 files changed, 444 insertions(+) create mode 100755 tests/topotests/pim_mrib/__init__.py create mode 100644 tests/topotests/pim_mrib/r1/frr.conf create mode 100644 tests/topotests/pim_mrib/r2/frr.conf create mode 100644 tests/topotests/pim_mrib/r3/frr.conf create mode 100644 tests/topotests/pim_mrib/r4/frr.conf create mode 100644 tests/topotests/pim_mrib/test_pim_mrib.py diff --git a/tests/topotests/pim_mrib/__init__.py b/tests/topotests/pim_mrib/__init__.py new file mode 100755 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/pim_mrib/r1/frr.conf b/tests/topotests/pim_mrib/r1/frr.conf new file mode 100644 index 000000000000..28cf2b2c460f --- /dev/null +++ b/tests/topotests/pim_mrib/r1/frr.conf @@ -0,0 +1,28 @@ +! +hostname r1 +password zebra +log file /tmp/r1-frr.log +! +!debug pim nht +!debug pim nht detail +!debug pim nht rp +! +interface r1-eth0 + ip address 10.0.0.1/24 + ip igmp + ip pim +! +interface r1-eth1 + ip address 10.0.1.1/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.2.0/24 10.0.0.2 50 +ip route 10.0.3.0/24 10.0.1.3 50 +! +router pim + rpf-lookup-mode mrib-then-urib + rp 10.0.0.1 224.0.0.0/4 +! \ No newline at end of file diff --git a/tests/topotests/pim_mrib/r2/frr.conf b/tests/topotests/pim_mrib/r2/frr.conf new file mode 100644 index 000000000000..3e647f6795d2 --- /dev/null +++ b/tests/topotests/pim_mrib/r2/frr.conf @@ -0,0 +1,28 @@ +! +hostname r2 +password zebra +log file /tmp/r2-frr.log +! +!debug pim nht +!debug pim nht detail +!debug pim nht rp +! +interface r2-eth0 + ip address 10.0.0.2/24 + ip igmp + ip pim +! +interface r2-eth1 + ip address 10.0.2.2/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.1.0/24 10.0.0.1 50 +ip route 10.0.3.0/24 10.0.2.4 50 +! +router pim + rpf-lookup-mode mrib-then-urib + rp 10.0.0.1 224.0.0.0/4 +! \ No newline at end of file diff --git a/tests/topotests/pim_mrib/r3/frr.conf b/tests/topotests/pim_mrib/r3/frr.conf new file mode 100644 index 000000000000..9815484d02d5 --- /dev/null +++ b/tests/topotests/pim_mrib/r3/frr.conf @@ -0,0 +1,28 @@ +! +hostname r3 +password zebra +log file /tmp/r3-frr.log +! +!debug pim nht +!debug pim nht detail +!debug pim nht rp +! +interface r3-eth0 + ip address 10.0.1.3/24 + ip igmp + ip pim +! +interface r3-eth1 + ip address 10.0.3.3/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.0.0/24 10.0.1.1 50 +ip route 10.0.2.0/24 10.0.3.4 50 +! +router pim + rpf-lookup-mode mrib-then-urib + rp 10.0.0.1 224.0.0.0/4 +! \ No newline at end of file diff --git a/tests/topotests/pim_mrib/r4/frr.conf b/tests/topotests/pim_mrib/r4/frr.conf new file mode 100644 index 000000000000..8432a7a35044 --- /dev/null +++ b/tests/topotests/pim_mrib/r4/frr.conf @@ -0,0 +1,29 @@ +! +hostname r4 +password zebra +log file /tmp/r4-frr.log +! +debug pim nht +debug pim nht detail +debug pim nht rp +debug zebra rib detail +! +interface r4-eth0 + ip address 10.0.2.4/24 + ip igmp + ip pim +! +interface r4-eth1 + ip address 10.0.3.4/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.0.0/24 10.0.2.2 50 +ip route 10.0.1.0/24 10.0.3.3 50 +! +router pim + rpf-lookup-mode mrib-then-urib + rp 10.0.0.1 224.0.0.0/4 +! \ No newline at end of file diff --git a/tests/topotests/pim_mrib/test_pim_mrib.py b/tests/topotests/pim_mrib/test_pim_mrib.py new file mode 100644 index 000000000000..e8f5c596aba4 --- /dev/null +++ b/tests/topotests/pim_mrib/test_pim_mrib.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_pim_mrib.py +# +# Copyright (c) 2024 ATCorp +# Nathan Bahr +# + +import os +import sys +import pytest +from functools import partial + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.topolog import logger +from lib.pim import ( + verify_pim_rp_info, +) +from lib.common_config import step, write_test_header + +""" +test_pim_mrib.py: Test PIM MRIB overrides and RPF modes +""" + +TOPOLOGY = """ + Test PIM MRIB overrides and RPF modes + + +---+---+ +---+---+ + | | 10.0.0.0/24 | | + + R1 +----------------------+ R2 | + | | .1 .2 | | + +---+---+ r1-eth0 r2-eth0 +---+---+ + .1 | r1-eth1 r2-eth1 | .2 + | | + 10.0.1.0/24 | | 10.0.2.0/24 + | | + .3 | r3-eth0 r4-eth0 | .4 + +---+---+ r3-eth1 r4-eth1 +---+---+ + | | .3 .4 | | + + R3 +----------------------+ R4 | + | | 10.0.3.0/24 | | + +---+---+ +---+---+ +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# Required to instantiate the topology builder class. +pytestmark = [pytest.mark.pimd] + + +def build_topo(tgen): + '''Build function''' + + # Create routers + tgen.add_router("r1") + tgen.add_router("r2") + tgen.add_router("r3") + tgen.add_router("r4") + + # Create topology links + tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth0", "r2-eth0") + tgen.add_link(tgen.gears["r1"], tgen.gears["r3"], "r1-eth1", "r3-eth0") + tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0") + tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1") + + +def setup_module(mod): + logger.info("PIM MRIB/RPF functionality:\n {}".format(TOPOLOGY)) + + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + # Initialize all routers. + tgen.start_router() + for router in router_list.values(): + if router.has_version("<", "4.0"): + tgen.set_error("unsupported version") + + +def teardown_module(mod): + '''Teardown the pytest environment''' + tgen = get_topogen() + tgen.stop_topology() + + +def test_pim_mrib_init(request): + '''Test boot in MRIB-than-URIB with the default MRIB''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Verify rp-info using default URIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_override(request): + '''Test MRIB override nexthop''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Install a MRIB route that has a shorter prefix length and lower cost. + # In MRIB-than-URIB mode, it should use this route + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + ip mroute 10.0.0.0/16 10.0.3.3 25 + ''' + ) + + step("Verify rp-info using MRIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_prefix_mode(request): + '''Test longer prefix lookup mode''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch to longer prefix match, should switch back to the URIB route + # even with the lower cost, the longer prefix match will win because of the mode + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + router pim + rpf-lookup-mode longer-prefix + ''' + ) + + step("Verify rp-info using URIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_dist_mode(request): + '''Test lower distance lookup mode''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch to lower distance match, should switch back to the MRIB route + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + router pim + rpf-lookup-mode lower-distance + ''' + ) + + step("Verify rp-info using MRIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_urib_mode(request): + '''Test URIB only lookup mode''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch to urib only match, should switch back to the URIB route + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + router pim + rpf-lookup-mode urib-only + ''' + ) + + step("Verify rp-info using URIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth0", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_mrib_mode(request): + '''Test MRIB only lookup mode''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Switch to mrib only match, should switch back to the MRIB route + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + router pim + rpf-lookup-mode mrib-only + ''' + ) + + step("Verify rp-info using MRIB nexthop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "r4-eth1", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_pim_mrib_mrib_mode_no_route(request): + '''Test MRIB only with no route''' + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Remove the MRIB route, in mrib-only mode, it should switch to no path for the RP + tgen.routers()["r4"].vtysh_cmd( + ''' + conf term + no ip mroute 10.0.0.0/16 10.0.3.3 25 + ''' + ) + + step("Verify rp-info with Unknown next hop") + result = verify_pim_rp_info( + tgen, + None, + "r4", + "224.0.0.0/4", + "Unknown", + "10.0.0.1", + "Static", + False, + "ipv4", + True, + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + +def test_memory_leak(): + '''Run the memory leak test and report results.''' + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From 98a5ff632c90bb27d61d633a1186103a08eedb48 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Tue, 24 Sep 2024 21:53:41 +0000 Subject: [PATCH 11/11] doc: Clean up Multicast RIB documentation Moved it all to PIM section and updated docs for recent changes. Signed-off-by: Nathan Bahr --- doc/developer/grpc.rst | 1 - doc/user/pim.rst | 136 +++++++++++++++++++++++++++++------------ doc/user/zebra.rst | 82 ------------------------- 3 files changed, 96 insertions(+), 123 deletions(-) diff --git a/doc/developer/grpc.rst b/doc/developer/grpc.rst index 4e81adf8b243..62d1594f4cfc 100644 --- a/doc/developer/grpc.rst +++ b/doc/developer/grpc.rst @@ -149,7 +149,6 @@ Below is how to compile and run the program, with the example output: ] }, "frr-zebra:zebra": { - "mcast-rpf-lookup": "mrib-then-urib", "workqueue-hold-timer": 10, "zapi-packets": 1000, "import-kernel-table": { diff --git a/doc/user/pim.rst b/doc/user/pim.rst index 0fe53247b05e..a52a60d59773 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -217,32 +217,47 @@ PIM Routers never do SM over. This command is vrf aware, to configure for a vrf, specify the vrf in the router pim block. -Global Multicast ----------------- +.. clicmd:: rpf-lookup-mode MODE -These commands are valid at the top-level of the configuration (or also per -vrf where indicated), instead of under the 'router pim' submode. + MODE sets the method used to perform RPF lookups. Supported modes: -.. clicmd:: ip multicast rpf-lookup-mode WORD + urib-only + Performs the lookup on the Unicast RIB. The Multicast RIB is never used. - Modify how PIM does RPF lookups in the zebra routing table. You can use - these choices: + mrib-only + Performs the lookup on the Multicast RIB. The Unicast RIB is never used. - longer-prefix - Lookup the RPF in both tables using the longer prefix as a match + mrib-then-urib + Tries to perform the lookup on the Multicast RIB. If any route is found, + that route is used. Otherwise, the Unicast RIB is tried. lower-distance - Lookup the RPF in both tables using the lower distance as a match + Performs a lookup on the Multicast RIB and Unicast RIB each. The result + with the lower administrative distance is used; if they're equal, the + Multicast RIB takes precedence. - mrib-only - Lookup in the Multicast RIB only + longer-prefix + Performs a lookup on the Multicast RIB and Unicast RIB each. The result + with the longer prefix length is used; if they're equal, the + Multicast RIB takes precedence. - mrib-then-urib - Lookup in the Multicast RIB then the Unicast Rib, returning first found. - This is the default value for lookup if this command is not entered + The ``mrib-then-urib`` setting is the default behavior if nothing is + configured. If this is the desired behavior, it should be explicitly + configured to make the configuration immune against possible changes in + what the default behavior is. - urib-only - Lookup in the Unicast Rib only. +.. warning:: + + Unreachable routes do not receive special treatment and do not cause + fallback to a second lookup. + +.. _pim-global-configuration: + +Global Multicast +================ + +These commands are valid at the top-level of the configuration (or also per +vrf where indicated), instead of under the 'router pim' submode. .. clicmd:: ip igmp generate-query-once [version (2-3)] @@ -257,6 +272,70 @@ vrf where indicated), instead of under the 'router pim' submode. 'no' form of the command disables the warning generation. This command is vrf aware. To configure per vrf, enter vrf submode. + +.. _pim-multicast-rib: + +Multicast RIB Commands +---------------------- + +The Multicast RIB provides a separate table of unicast destinations which +is used for Multicast Reverse Path Forwarding decisions. It is used with +a multicast source's IP address, hence contains not multicast group +addresses but unicast addresses. + +This table is fully separate from the default unicast table. However, +RPF lookup can include the unicast table. + +.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE] + + Adds a static route entry to the Multicast RIB. This performs exactly as the + ``ip route`` command, except that it inserts the route in the Multicast RIB + instead of the Unicast RIB. + These routes are only used for RPF lookup and will not be used by zebra for + insertion into the kernel *or* for normal rib processing. As such it is + possible to create weird states with these commands. Use with caution. Most + of the time this will not be necessary. + +.. clicmd:: show [ip|ipv6] rpf + + Prints the entire Multicast RIB. Note that this is independent of the + configured RPF lookup mode, the Multicast RIB may be printed yet not + used at all. + +.. clicmd:: show [ip|ipv6] rpf ADDR + + Performs a Multicast RPF lookup using the Multicast RIB only. + ADDR specifies the multicast source address to look up. Note that this is + independent of the configured RPF lookup mode. + + :: + + > show ip rpf 192.0.2.1 + Routing entry for 192.0.2.0/24 using Multicast RIB + Known via "kernel", distance 0, metric 0, best + * 198.51.100.1, via eth0 + + + Indicates that a multicast source lookup for 192.0.2.1 against the + Multicast RIB would use an entry for 192.0.2.0/24 with a gateway of + 198.51.100.1. + +.. clicmd:: show ip pim [vrf NAME] nexthop-lookup ADDR [GROUP] + + Performs a nexthop lookup according to the configured RPF lookup mode. + This performs the lookup for a given source address, and optionally with + a group address, which may effect the nexthop decision. + + :: + + > show ip pim nexthop-lookup 192.0.2.1 + (192.0.2.1, *) --- Nexthop 198.10.10.1 Interface eth1 + + + Indicates the a source lookup for 192.0.2.1 according to the configured RPF + lookup mode would use the gateway address 192.10.10.1 on interface eth1. + + .. _pim-interface-configuration: PIM Interface Configuration @@ -374,29 +453,6 @@ is in a vrf, enter the interface command with the vrf keyword at the end. :ref:`bfd-pim-peer-config` - -.. _pim-multicast-rib: - -PIM Multicast RIB -================= - -In order to influence Multicast RPF lookup, it is possible to insert -into zebra routes for the Multicast RIB. These routes are only -used for RPF lookup and will not be used by zebra for insertion -into the kernel *or* for normal rib processing. As such it is -possible to create weird states with these commands. Use with -caution. Most of the time this will not be necessary. - -.. clicmd:: ip mroute A.B.C.D/M A.B.C.D (1-255) - - Insert into the Multicast Rib Route A.B.C.D/M with specified nexthop. The - distance can be specified as well if desired. - -.. clicmd:: ip mroute A.B.C.D/M INTERFACE (1-255) - - Insert into the Multicast Rib Route A.B.C.D/M using the specified INTERFACE. - The distance can be specified as well if desired. - .. _msdp-configuration: Multicast Source Discovery Protocol (MSDP) Configuration diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index 74564c94b9c3..c5af8167c632 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -1123,88 +1123,6 @@ and this section also helps that case. ! ... -.. _multicast-rib-commands: - -Multicast RIB Commands -====================== - -The Multicast RIB provides a separate table of unicast destinations which -is used for Multicast Reverse Path Forwarding decisions. It is used with -a multicast source's IP address, hence contains not multicast group -addresses but unicast addresses. - -This table is fully separate from the default unicast table. However, -RPF lookup can include the unicast table. - -WARNING: RPF lookup results are non-responsive in this version of FRR, -i.e. multicast routing does not actively react to changes in underlying -unicast topology! - -.. clicmd:: ip multicast rpf-lookup-mode MODE - - - MODE sets the method used to perform RPF lookups. Supported modes: - - urib-only - Performs the lookup on the Unicast RIB. The Multicast RIB is never used. - - mrib-only - Performs the lookup on the Multicast RIB. The Unicast RIB is never used. - - mrib-then-urib - Tries to perform the lookup on the Multicast RIB. If any route is found, - that route is used. Otherwise, the Unicast RIB is tried. - - lower-distance - Performs a lookup on the Multicast RIB and Unicast RIB each. The result - with the lower administrative distance is used; if they're equal, the - Multicast RIB takes precedence. - - longer-prefix - Performs a lookup on the Multicast RIB and Unicast RIB each. The result - with the longer prefix length is used; if they're equal, the - Multicast RIB takes precedence. - - The ``mrib-then-urib`` setting is the default behavior if nothing is - configured. If this is the desired behavior, it should be explicitly - configured to make the configuration immune against possible changes in - what the default behavior is. - -.. warning:: - - Unreachable routes do not receive special treatment and do not cause - fallback to a second lookup. - -.. clicmd:: show [ip|ipv6] rpf ADDR - - Performs a Multicast RPF lookup, as configured with ``ip multicast - rpf-lookup-mode MODE``. ADDR specifies the multicast source address to look - up. - - :: - - > show ip rpf 192.0.2.1 - Routing entry for 192.0.2.0/24 using Unicast RIB - Known via "kernel", distance 0, metric 0, best - * 198.51.100.1, via eth0 - - - Indicates that a multicast source lookup for 192.0.2.1 would use an - Unicast RIB entry for 192.0.2.0/24 with a gateway of 198.51.100.1. - -.. clicmd:: show [ip|ipv6] rpf - - Prints the entire Multicast RIB. Note that this is independent of the - configured RPF lookup mode, the Multicast RIB may be printed yet not - used at all. - -.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE] - - - Adds a static route entry to the Multicast RIB. This performs exactly as the - ``ip route`` command, except that it inserts the route in the Multicast RIB - instead of the Unicast RIB. - .. _zebra-route-filtering: zebra Route Filtering