diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 708dcde4ce5a..e967c246975b 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -720,8 +720,11 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + app_db_ = shared_ptr(new DBConnector("APPL_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); + monitor_session_producer_ = unique_ptr
(new Table(app_db_.get(), APP_VNET_MONITOR_TABLE_NAME)); gBfdOrch->attach(this); } @@ -900,6 +903,7 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, const map& monitors) { SWSS_LOG_ENTER(); @@ -940,7 +944,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP sai_object_id_t nh_id; if (!hasNextHopGroup(vnet, nexthops)) { - setEndpointMonitor(vnet, monitors, nexthops); + setEndpointMonitor(vnet, monitors, nexthops, monitoring, ipPrefix); if (nexthops.getSize() == 1) { NextHopKey nexthop(nexthops.to_string(), true); @@ -957,7 +961,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { if (!addNextHopGroup(vnet, nexthops, vrf_obj)) { - delEndpointMonitor(vnet, nexthops); + delEndpointMonitor(vnet, nexthops, ipPrefix); SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); return false; } @@ -1031,7 +1035,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } - delEndpointMonitor(vnet, nhg); + delEndpointMonitor(vnet, nhg, ipPrefix); } else { @@ -1091,7 +1095,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } - delEndpointMonitor(vnet, nhg); + delEndpointMonitor(vnet, nhg, ipPrefix); } else { @@ -1609,7 +1613,55 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo bfd_sessions_.erase(monitor_addr); } -void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +void VNetRouteOrch::createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end() && + monitor_info_[vnet][ipPrefix].find(endpoint) != monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitoring session for prefix %s endpoint %s already exist", ipPrefix.to_string().c_str(), endpoint_addr.to_string().c_str()); + return; + } + else + { + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + + auto overlay_dmac = vnet_obj->getOverlayDMac(); + string key = ipPrefix.to_string() + ":" + monitor_addr.to_string(); + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + + MonitorSessionInfo& info = monitor_info_[vnet][ipPrefix][endpoint]; + info.monitor = monitor_addr; + info.state = MONITOR_SESSION_STATE::MONITOR_SESSION_STATE_DOWN; + } +} + +void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(endpoint) == monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitor session for prefix %s endpoint %s does not exist", ipPrefix.to_string().c_str(), endpoint_addr.to_string().c_str()); + } + + string key = ipPrefix.to_string() + ":" + monitor_addr.to_string(); + monitor_session_producer_->del(key); + monitor_info_[vnet][ipPrefix].erase(endpoint); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); @@ -1617,31 +1669,62 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map nhks = nexthops.getNextHops(); + bool is_custom_monitoring = false; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + is_custom_monitoring = true; + } for (auto nhk: nhks) { IpAddress ip = nhk.ip_address; - if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { - if (--nexthop_info_[vnet][ip].ref_count == 0) + if (is_custom_monitoring) + { + if ( monitor_info_[vnet][ipPrefix].find(nhk) != monitor_info_[vnet][ipPrefix].end()) { - IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; - removeBfdSession(vnet, nhk, monitor_addr); + removeMonitoringSession(vnet, nhk, monitor_info_[vnet][ipPrefix][nhk].monitor, ipPrefix); + } + } + else + { + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; + { + removeBfdSession(vnet, nhk, monitor_addr); + } + } } } } + if (is_custom_monitoring) + { + monitor_info_[vnet].erase(ipPrefix); + } } void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile) @@ -2024,7 +2107,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitors); + return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitoring, monitors); } return true; diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 9c950c265f5e..3d03e22420a5 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -24,6 +24,12 @@ extern sai_object_id_t gVirtualRouterId; +enum class MONITOR_SESSION_STATE +{ + MONITOR_SESSION_STATE_UP, + MONITOR_SESSION_STATE_DOWN, + MONITOR_SESSION_STATE_UNKNOWN, +}; const request_description_t vnet_request_description = { { REQ_T_STRING }, { @@ -339,9 +345,16 @@ struct BfdSessionInfo NextHopKey endpoint; }; +struct MonitorSessionInfo +{ + MONITOR_SESSION_STATE state; + IpAddress monitor; +}; + typedef std::map VNetNextHopGroupInfoTable; typedef std::map VNetTunnelRouteTable; typedef std::map BfdSessionTable; +typedef std::map> MonitorSessionTable; typedef std::map VNetEndpointInfoTable; class VNetRouteOrch : public Orch2, public Subject, public Observer @@ -374,8 +387,11 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); - void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); - void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); + void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, + const string& monitoring, IpPrefix& ipPrefix); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); void addRouteAdvertisement(IpPrefix& ipPrefix, string& profile); @@ -386,6 +402,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, const std::map& monitors=std::map()); template @@ -400,9 +417,12 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; BfdSessionTable bfd_sessions_; + std::map monitor_info_; std::map nexthop_info_; ProducerStateTable bfd_session_producer_; + unique_ptr
monitor_session_producer_; shared_ptr state_db_; + shared_ptr app_db_; unique_ptr
state_vnet_rt_tunnel_table_; unique_ptr
state_vnet_rt_adv_table_; }; diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 6b4fc175a322..8a83d59925db 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -546,6 +546,7 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" def __init__(self): self.tunnel_map_ids = set() @@ -960,7 +961,21 @@ def _access_function(): return True - + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = prefix + ':' + endpoint + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = prefix + ':' + endpoint + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) + class TestVnetOrch(object): def get_vnet_obj(self): @@ -2376,7 +2391,7 @@ def test_vnet_orch_17(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10009', "", overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10009', "") vnet_obj.check_vnet_entry(dvs, 'Vnet17') vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet17', '10009') @@ -2384,7 +2399,7 @@ def test_vnet_orch_17(self, dvs, testlog): vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3', primary ='9.0.0.1',monitoring='custom', adv_prefix='100.100.1.1/27') + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') # default bfd status is down, route should not be programmed in this status vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) @@ -2432,6 +2447,37 @@ def test_vnet_orch_17(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet17') vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') + ''' + Test 18 - Test for vxlan custom monitoring config. + ''' + def test_vnet_orch_18(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_18' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet18', tunnel_name, '10009', "", overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, 'Vnet18') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet18', '10009') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3',primary ='9.0.0.1',monitoring='custom', adv_prefix='100.100.1.1/27') + + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.3", "vxlan", "22:33:33:44:44:66") + + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18') + + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy():