Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Mux orch] Fix tunnel based NHs for IPv6 and ECMP routes #1711

Merged
merged 1 commit into from
Apr 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 30 additions & 8 deletions cfgmgr/tunnelmgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,43 @@ static int cmdIpTunnelRouteAdd(const std::string& pfx, std::string & res)
// ip route add/replace {{ip prefix}} dev {{tunnel intf}}
// Replace route if route already exists
ostringstream cmd;
cmd << IP_CMD " route replace "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
if (IpPrefix(pfx).isV4())
{
cmd << IP_CMD " route replace "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
}
else
{
cmd << IP_CMD " -6 route replace "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
}

return swss::exec(cmd.str(), res);
}

static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res)
{
// ip route del {{ip prefix}} dev {{tunnel intf}}
ostringstream cmd;
cmd << IP_CMD " route del "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
if (IpPrefix(pfx).isV4())
{
cmd << IP_CMD " route del "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
}
else
{
cmd << IP_CMD " -6 route del "
<< shellquote(pfx)
<< " dev "
<< TUNIF;
}

return swss::exec(cmd.str(), res);
}

Expand Down
12 changes: 0 additions & 12 deletions orchagent/muxorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1278,12 +1278,6 @@ void MuxCableOrch::updateMuxState(string portName, string muxState)

void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)
{
if (!nhKey.ip_address.isV4())
{
SWSS_LOG_INFO("IPv6 tunnel route add '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
return;
}

vector<FieldValueTuple> data;
string key, alias = nhKey.alias;

Expand All @@ -1299,12 +1293,6 @@ void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)

void MuxCableOrch::removeTunnelRoute(const NextHopKey &nhKey)
{
if (!nhKey.ip_address.isV4())
{
SWSS_LOG_INFO("IPv6 tunnel route remove '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
return;
}

string key, alias = nhKey.alias;

IpPrefix pfx = nhKey.ip_address.to_string();
Expand Down
5 changes: 4 additions & 1 deletion orchagent/neighorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,10 @@ bool NeighOrch::isNextHopFlagSet(const NextHopKey &nexthop, const uint32_t nh_fl

auto nhop = m_syncdNextHops.find(nexthop);

assert(nhop != m_syncdNextHops.end());
if (nhop == m_syncdNextHops.end())
{
return false;
}

if (nhop->second.nh_flags & nh_flag)
{
Expand Down
42 changes: 37 additions & 5 deletions orchagent/routeorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ void RouteOrch::doTask(Consumer& consumer)
* way is to create loopback interface and then create
* route pointing to it, so that we can traps packets to
* CPU */
if (alias == "eth0" || alias == "docker0" || alias == "tun0" ||
if (alias == "eth0" || alias == "docker0" ||
alias == "lo" || !alias.compare(0, strlen(LOOPBACK_PREFIX), LOOPBACK_PREFIX))
{
excp_intfs_flag = true;
Expand All @@ -599,10 +599,18 @@ void RouteOrch::doTask(Consumer& consumer)

if (overlay_nh == false)
{
if (alsv[0] == "tun0" && !(IpAddress(ipv[0]).isZero()))
{
alsv[0] = gIntfsOrch->getRouterIntfsAlias(ipv[0]);
}
nhg_str = ipv[0] + NH_DELIMITER + alsv[0];

for (uint32_t i = 1; i < ipv.size(); i++)
{
if (alsv[i] == "tun0" && !(IpAddress(ipv[i]).isZero()))
{
alsv[i] = gIntfsOrch->getRouterIntfsAlias(ipv[i]);
}
nhg_str += NHG_DELIMITER + ipv[i] + NH_DELIMITER + alsv[i];
}

Expand All @@ -628,6 +636,11 @@ void RouteOrch::doTask(Consumer& consumer)
/* add addBlackholeRoute or addRoute support empty nhg */
it = consumer.m_toSync.erase(it);
}
/* skip direct routes to tun0 */
else if (alsv[0] == "tun0")
{
it = consumer.m_toSync.erase(it);
}
/* directly connected route to VRF interface which come from kernel */
else if (!alsv[0].compare(0, strlen(VRF_PREFIX), VRF_PREFIX))
{
Expand Down Expand Up @@ -1009,6 +1022,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
vector<sai_object_id_t> next_hop_ids;
set<NextHopKey> next_hop_set = nexthops.getNextHops();
std::map<sai_object_id_t, NextHopKey> nhopgroup_members_set;
std::map<sai_object_id_t, set<NextHopKey>> nhopgroup_shared_set;

/* Assert each IP address exists in m_syncdNextHops table,
* and add the corresponding next_hop_id to next_hop_ids. */
Expand All @@ -1029,7 +1043,14 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)

sai_object_id_t next_hop_id = m_neighOrch->getNextHopId(it);
next_hop_ids.push_back(next_hop_id);
nhopgroup_members_set[next_hop_id] = it;
if (nhopgroup_members_set.find(next_hop_id) == nhopgroup_members_set.end())
{
nhopgroup_members_set[next_hop_id] = it;
}
else
{
nhopgroup_shared_set[next_hop_id].insert(it);
}
}

sai_attribute_t nhg_attr;
Expand Down Expand Up @@ -1103,8 +1124,20 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER);

// Save the membership into next hop structure
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] =
nhgm_id;
if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end())
{
auto it = nhopgroup_shared_set[nhid].begin();
next_hop_group_entry.nhopgroup_members[*it] = nhgm_id;
nhopgroup_shared_set[nhid].erase(it);
if (nhopgroup_shared_set[nhid].empty())
{
nhopgroup_shared_set.erase(nhid);
}
}
else
{
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id;
}
}

/* Increment the ref_count for the next hops used by the next hop group. */
Expand All @@ -1118,7 +1151,6 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
next_hop_group_entry.ref_count = 0;
m_syncdNextHopGroups[nexthops] = next_hop_group_entry;


return true;
}

Expand Down
39 changes: 39 additions & 0 deletions tests/test_mux.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def create_vlan_interface(self, confdb, asicdb, dvs):
fvs = {"NULL": "NULL"}
confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs)
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs)
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs)

dvs.runcmd("config interface startup Ethernet0")
dvs.runcmd("config interface startup Ethernet4")
Expand Down Expand Up @@ -334,6 +335,44 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route):
self.set_mux_state(appdb, "Ethernet4", "active")
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])

ps._del(rtprefix)

# Test IPv6 ECMP routes and start with standby config
self.set_mux_state(appdb, "Ethernet0", "standby")
self.set_mux_state(appdb, "Ethernet4", "standby")

rtprefix = "2020::/64"

dvs_route.check_asicdb_deleted_route_entries([rtprefix])

ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE")

fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")])

ps.set(rtprefix, fvs)

# Check if route was propagated to ASIC DB
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])

# Check for nexthop group and validate nexthop group member in asic db
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)

# Step: 1 - Change one NH to active and verify ecmp route
self.set_mux_state(appdb, "Ethernet0", "active")
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)

# Step: 2 - Change the other NH to active and verify ecmp route
self.set_mux_state(appdb, "Ethernet4", "active")
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])

# Step: 3 - Change one NH to back to standby and verify ecmp route
self.set_mux_state(appdb, "Ethernet0", "standby")
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)

# Step: 4 - Change the other NH to standby and verify ecmp route
self.set_mux_state(appdb, "Ethernet4", "standby")
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)


def get_expected_sai_qualifiers(self, portlist, dvs_acl):
expected_sai_qualifiers = {
Expand Down