From 887766302d319b8b080b8f5559e36d95c0b3050f Mon Sep 17 00:00:00 2001 From: bingwang Date: Wed, 27 Apr 2022 03:22:24 -0700 Subject: [PATCH 1/5] Support tunnel traffic QoS remapping Signed-off-by: bingwang --- orchagent/muxorch.cpp | 67 ++++++++- orchagent/orchdaemon.cpp | 3 +- orchagent/qosorch.cpp | 121 +++++++++++++++- orchagent/qosorch.h | 16 +++ orchagent/tunneldecaporch.cpp | 257 +++++++++++++++++++++++++++++----- orchagent/tunneldecaporch.h | 41 ++++-- tests/test_mux.py | 134 +++++++++++++++--- tests/test_tunnel.py | 91 +++++++++++- 8 files changed, 655 insertions(+), 75 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index dd3ed79126..90c4c43978 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -23,6 +23,7 @@ #include "aclorch.h" #include "routeorch.h" #include "fdborch.h" +#include "qosorch.h" /* Global variables */ extern Directory gDirectory; @@ -32,6 +33,7 @@ extern RouteOrch *gRouteOrch; extern AclOrch *gAclOrch; extern PortsOrch *gPortsOrch; extern FdbOrch *gFdbOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gUnderlayIfId; @@ -42,7 +44,6 @@ extern sai_next_hop_api_t* sai_next_hop_api; extern sai_router_interface_api_t* sai_router_intfs_api; /* Constants */ -#define MUX_TUNNEL "MuxTunnel0" #define MUX_ACL_TABLE_NAME INGRESS_TABLE_DROP #define MUX_ACL_RULE_NAME "mux_acl_rule" #define MUX_HW_STATE_UNKNOWN "unknown" @@ -162,7 +163,12 @@ static sai_status_t remove_route(IpPrefix &pfx) return status; } -static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* p_src_ip) +static sai_object_id_t create_tunnel( + const IpAddress* p_dst_ip, + const IpAddress* p_src_ip, + sai_object_id_t tc_to_dscp_map_id, + sai_object_id_t tc_to_queue_map_id, + string dscp_mode_name) { sai_status_t status; @@ -206,6 +212,22 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); + if (dscp_mode_name == "uniform" || dscp_mode_name == "pipe") + { + sai_tunnel_dscp_mode_t dscp_mode; + if (dscp_mode_name == "uniform") + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; + } + else + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + } + attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); + } + attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_DROP; tunnel_attrs.push_back(attr); @@ -224,6 +246,22 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* tunnel_attrs.push_back(attr); } + // DSCP rewriting + if (tc_to_dscp_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP; + attr.value.oid = tc_to_dscp_map_id; + tunnel_attrs.push_back(attr); + } + + // TC remapping + if (tc_to_queue_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP; + attr.value.oid = tc_to_queue_map_id; + tunnel_attrs.push_back(attr); + } + sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -1262,7 +1300,30 @@ bool MuxOrch::handlePeerSwitch(const Request& request) auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip); + + // Read dscp_mode of MuxTunnel0 from decap_orch + string dscp_mode_name = decap_orch_->getDscpMode(MUX_TUNNEL); + if (dscp_mode_name == "") + { + SWSS_LOG_NOTICE("dscp_mode for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + + // Read tc_to_dscp_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_dscp_field_name, tc_to_dscp_map_id); + if (tc_to_dscp_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("tc_to_dscp_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + // Read tc_to_queue_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_queue_field_name, tc_to_queue_map_id); + if (tc_to_queue_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("tc_to_queue_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id, dscp_mode_name); mux_peer_switch_ = peer_ip; SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 190f4e3f25..9dd36000af 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -179,7 +179,8 @@ bool OrchDaemon::init() CFG_WRED_PROFILE_TABLE_NAME, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, - CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; QosOrch *qos_orch = new QosOrch(m_configDb, qos_tables); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 7b2e43e0dd..e5f9fa01a9 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -69,7 +69,9 @@ type_map QosOrch::m_qos_maps = { {CFG_QUEUE_TABLE_NAME, new object_reference_map()}, {CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, new object_reference_map()}, {CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, new object_reference_map()}, - {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()} + {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, + {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()}, + {APP_TUNNEL_DECAP_TABLE_NAME, new object_reference_map()} }; task_process_status QosMapHandler::processWorkItem(Consumer& consumer) @@ -763,6 +765,89 @@ task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer) return pfc_to_queue_handler.processWorkItem(consumer); } +task_process_status QosOrch::handleTcToDscpTable(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + TcToDscpMapHandler tc_to_dscp_handler; + return tc_to_dscp_handler.processWorkItem(consumer); +} + +bool TcToDscpMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, + vector &attributes) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t list_attr; + list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + list_attr.value.qosmap.count = (uint32_t)kfvFieldsValues(tuple).size(); + list_attr.value.qosmap.list = new sai_qos_map_t[list_attr.value.qosmap.count](); + uint32_t ind = 0; + + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++) + { + try + { + auto value = stoi(fvValue(*i)); + if (value < 0) + { + SWSS_LOG_ERROR("DSCP value %d is negative", value); + delete[] list_attr.value.qosmap.list; + return false; + } + else if (value > DSCP_MAX_VAL) + { + SWSS_LOG_ERROR("DSCP value %d is greater than max value %d", value, DSCP_MAX_VAL); + delete[] list_attr.value.qosmap.list; + return false; + } + list_attr.value.qosmap.list[ind].key.tc = static_cast(stoi(fvField(*i))); + list_attr.value.qosmap.list[ind].value.dscp = static_cast(value); + + SWSS_LOG_DEBUG("key.tc:%d, value.dscp:%d", + list_attr.value.qosmap.list[ind].key.tc, + list_attr.value.qosmap.list[ind].value.dscp); + } + catch(const invalid_argument& e) + { + SWSS_LOG_ERROR("Got exception during conversion: %s", e.what()); + delete[] list_attr.value.qosmap.list; + return false; + } + } + attributes.push_back(list_attr); + return true; +} + +sai_object_id_t TcToDscpMapHandler::addQosItem(const vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_status_t sai_status; + sai_object_id_t sai_object; + vector qos_map_attrs; + + sai_attribute_t qos_map_attr; + qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE; + qos_map_attr.value.u32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP; + qos_map_attrs.push_back(qos_map_attr); + + qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count; + qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list; + qos_map_attrs.push_back(qos_map_attr); + + sai_status = sai_qos_map_api->create_qos_map(&sai_object, + gSwitchId, + (uint32_t)qos_map_attrs.size(), + qos_map_attrs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create tc_to_dscp map. status:%d", sai_status); + return SAI_NULL_OBJECT_ID; + } + SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); + return sai_object; +} + QosOrch::QosOrch(DBConnector *db, vector &tableNames) : Orch(db, tableNames) { SWSS_LOG_ENTER(); @@ -786,6 +871,7 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_QUEUE_TABLE_NAME, &QosOrch::handleQueueTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PORT_QOS_MAP_TABLE_NAME, &QosOrch::handlePortQosMapTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_WRED_PROFILE_TABLE_NAME, &QosOrch::handleWredProfileTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAME, &QosOrch::handleTcToDscpTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable)); @@ -1509,3 +1595,36 @@ void QosOrch::doTask(Consumer &consumer) } } } + + +/** + * Function Description: + * @brief Resolve the id of QoS map that is referenced by tunnel + * + * Arguments: + * @param[in] referencing_table_name - The name of table that is referencing the QoS map + * @param[in] tunnle_name - The name of tunnel + * @param[in] map_type_name - The type of referenced QoS map + * @param[in] tuple - The KeyOpFieldsValuesTuple that contains keys - values + * + * Return Values: + * @return The sai_object_id of referenced map, or SAI_NULL_OBJECT_ID if there's an error + */ +sai_object_id_t QosOrch::resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple) +{ + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, tuple, id, object_name); + if (status == ref_resolve_status::success) + { + + setObjectReference(m_qos_maps, referencing_table_name, tunnel_name, map_type_name, object_name); + SWSS_LOG_INFO("Resolved QoS map for table %s tunnel %s type %s name %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str(), object_name.c_str()); + return id; + } + else + { + SWSS_LOG_ERROR("Failed to resolve QoS map for table %s tunnel %s type %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str()); + return SAI_NULL_OBJECT_ID; + } +} diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 37002be566..d2ebec4db0 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -25,6 +25,10 @@ const string green_min_threshold_field_name = "green_min_threshold"; const string red_drop_probability_field_name = "red_drop_probability"; const string yellow_drop_probability_field_name = "yellow_drop_probability"; const string green_drop_probability_field_name = "green_drop_probability"; +const string decap_dscp_to_tc_field_name = "decap_dscp_to_tc_map"; +const string decap_tc_to_pg_field_name = "decap_tc_to_pg_map"; +const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map"; +const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map"; const string wred_profile_field_name = "wred_profile"; const string wred_red_enable_field_name = "wred_red_enable"; @@ -122,6 +126,14 @@ class PfcToQueueHandler : public QosMapHandler sai_object_id_t addQosItem(const vector &attributes); }; +// Handler for TC_TO_DSCP_MAP +class TcToDscpMapHandler : public QosMapHandler +{ +public: + bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; + sai_object_id_t addQosItem(const vector &attributes) override; +}; + class QosOrch : public Orch { public: @@ -129,6 +141,9 @@ class QosOrch : public Orch static type_map& getTypeMap(); static type_map m_qos_maps; + + sai_object_id_t resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple); + private: void doTask() override; virtual void doTask(Consumer& consumer); @@ -149,6 +164,7 @@ class QosOrch : public Orch task_process_status handleSchedulerTable(Consumer& consumer); task_process_status handleQueueTable(Consumer& consumer); task_process_status handleWredProfileTable(Consumer& consumer); + task_process_status handleTcToDscpTable(Consumer& consumer); sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 6ef2c96f74..083ce83e33 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -5,6 +5,7 @@ #include "crmorch.h" #include "logger.h" #include "swssnet.h" +#include "qosorch.h" #define OVERLAY_RIF_DEFAULT_MTU 9100 @@ -17,6 +18,7 @@ extern sai_object_id_t gUnderlayIfId; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern CrmOrch* gCrmOrch; +extern QosOrch* gQosOrch; TunnelDecapOrch::TunnelDecapOrch(DBConnector *db, string tableName) : Orch(db, tableName) { @@ -31,7 +33,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { return; } - + string table_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -48,10 +50,22 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ecn_mode; string encap_ecn_mode; string ttl_mode; + sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; + // The tc_to_dscp_map_id and tc_to_queue_map_id are parsed here for muxorch to retrieve + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + bool valid = true; + sai_object_id_t tunnel_id = SAI_NULL_OBJECT_ID; + // checking to see if the tunnel already exists bool exists = (tunnelTable.find(key) != tunnelTable.end()); + if (exists) + { + tunnel_id = tunnelTable[key].tunnel_id; + } if (op == SET_COMMAND) { @@ -114,7 +128,8 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), dscp_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), dscp_mode, tunnel_id); + tunnelTable[key].dscp_mode = dscp_mode; } } else if (fvField(i) == "ecn_mode") @@ -128,7 +143,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), ecn_mode, tunnel_id); } } else if (fvField(i) == "encap_ecn_mode") @@ -142,7 +157,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), encap_ecn_mode, tunnel_id); } } else if (fvField(i) == "ttl_mode") @@ -156,7 +171,41 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ttl_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), ttl_mode, tunnel_id); + } + } + else if (fvField(i) == decap_dscp_to_tc_field_name) + { + dscp_to_dc_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_dscp_to_tc_field_name, t); + if (exists && dscp_to_dc_map_id != SAI_NULL_OBJECT_ID) + { + setTunnelAttribute(fvField(i), dscp_to_dc_map_id, tunnel_id); + } + } + else if (fvField(i) == decap_tc_to_pg_field_name) + { + tc_to_pg_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_tc_to_pg_field_name, t); + if (exists && tc_to_pg_map_id != SAI_NULL_OBJECT_ID) + { + setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); + } + } + else if (fvField(i) == encap_tc_to_dscp_field_name) + { + tc_to_dscp_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_dscp_field_name, t); + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + } + } + else if (fvField(i) == encap_tc_to_queue_field_name) + { + tc_to_queue_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_queue_field_name, t); + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; } } } @@ -164,8 +213,12 @@ void TunnelDecapOrch::doTask(Consumer& consumer) // create new tunnel if it doesn't exists already if (valid && !exists) { - if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode)) + if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_dc_map_id, tc_to_pg_map_id)) { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } else @@ -202,22 +255,34 @@ void TunnelDecapOrch::doTask(Consumer& consumer) * @param[in] dscp - dscp mode (uniform/pipe) * @param[in] ecn - ecn mode (copy_from_outer/standard) * @param[in] ttl - ttl mode (uniform/pipe) + * @param[in] dscp_to_tc_map_id - Map ID for remapping DSCP to TC (decap) + * @param[in] tc_to_pg_map_id - Map ID for remapping TC to PG (decap) * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip, IpAddress* p_src_ip, string dscp, string ecn, string encap_ecn, string ttl) +bool TunnelDecapOrch::addDecapTunnel( + string key, + string type, + IpAddresses dst_ip, + IpAddress* p_src_ip, + string dscp, + string ecn, + string encap_ecn, + string ttl, + sai_object_id_t dscp_to_tc_map_id, + sai_object_id_t tc_to_pg_map_id) { SWSS_LOG_ENTER(); sai_status_t status; - + IpAddress src_ip("0.0.0.0"); // adding tunnel attributes to array and writing to ASIC_DB sai_attribute_t attr; vector tunnel_attrs; sai_object_id_t overlayIfId; - + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; // create the overlay router interface to create a LOOPBACK type router interface (decap) vector overlay_intf_attrs; @@ -264,6 +329,8 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; copy(attr.value.ipaddr, p_src_ip->to_string()); tunnel_attrs.push_back(attr); + src_ip = *p_src_ip; + term_type = TUNNEL_TERM_TYPE_P2P; } // decap ecn mode (copy from outer/standard) @@ -312,6 +379,22 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } tunnel_attrs.push_back(attr); + // DSCP_TO_TC_MAP + if (dscp_to_tc_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + attr.value.oid = dscp_to_tc_map_id; + tunnel_attrs.push_back(attr); + } + + //TC_TO_PG_MAP + if (tc_to_pg_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + attr.value.oid = tc_to_pg_map_id; + tunnel_attrs.push_back(attr); + } + // write attributes to ASIC_DB sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); @@ -325,10 +408,10 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {} }; + tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, dscp, SAI_NULL_OBJECT_ID, SAI_NULL_OBJECT_ID }; - // create a decap tunnel entry for every ip - if (!addDecapTunnelTermEntries(key, dst_ip, tunnel_id)) + // create a decap tunnel entry for every source_ip - dest_ip pair + if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) { return false; } @@ -340,15 +423,17 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip * Function Description: * @brief adds a decap tunnel termination entry to ASIC_DB * - * Arguments: + * Arguments: * @param[in] tunnelKey - key of the tunnel from APP_DB - * @param[in] dst_ip - destination ip addresses to decap + * @param[in] src_ip - source ip address of decap tunnel + * @param[in] dst_ips - destination ip addresses to decap * @param[in] tunnel_id - the id of the tunnel + * @param[in] term_type - P2P or P2MP. Other types (MP2P and MP2MP) not supported yet * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses dst_ip, sai_object_id_t tunnel_id) +bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ips, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) { SWSS_LOG_ENTER(); @@ -361,7 +446,14 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; - attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P; + } + else + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + } tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; @@ -372,19 +464,38 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds attr.value.oid = tunnel_id; tunnel_table_entry_attrs.push_back(attr); + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + // Set src ip for P2P only + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP; + copy(attr.value.ipaddr, src_ip); + tunnel_table_entry_attrs.push_back(attr); + } + TunnelEntry *tunnel_info = &tunnelTable.find(tunnelKey)->second; // loop through the IP list and create a new tunnel table entry for every IP (in network byte order) - set tunnel_ips = dst_ip.getIpAddresses(); + set tunnel_ips = dst_ips.getIpAddresses(); for (auto it = tunnel_ips.begin(); it != tunnel_ips.end(); ++it) { const IpAddress& ia = *it; - string ip = ia.to_string(); + string dst_ip = ia.to_string(); + // The key will be src_ip-dst_ip (like 10.1.1.1-20.2.2.2) if src_ip is not 0, + // or the key will contain dst_ip only + string key; + if (!src_ip.isZero()) + { + key = src_ip.to_string() + '-' + dst_ip; + } + else + { + key = dst_ip; + } - // check if the there's an entry already for the ip - if (existingIps.find(ip) != existingIps.end()) + // check if the there's an entry already for the key pair + if (existingIps.find(key) != existingIps.end()) { - SWSS_LOG_ERROR("%s already exists. Did not create entry.", ip.c_str()); + SWSS_LOG_NOTICE("%s already exists. Did not create entry.", key.c_str()); } else { @@ -397,7 +508,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry(&tunnel_term_table_entry_id, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", ip.c_str()); + SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", key.c_str()); task_process_status handle_status = handleSaiCreateStatus(SAI_API_TUNNEL, status); if (handle_status != task_success) { @@ -406,15 +517,15 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds } // insert into ip to entry mapping - existingIps.insert(ip); + existingIps.insert(key); // insert entry id and ip into tunnel mapping - tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip.to_string(), dst_ip, tunnel_type }); // pop the last element for the next loop tunnel_table_entry_attrs.pop_back(); - SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", ip.c_str()); + SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", dst_ip.c_str()); } } @@ -504,6 +615,51 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ return true; } +/** + * Function Description: + * @brief sets attributes for a tunnel (decap_dscp_to_tc_map and decap_tc_to_pg_map are supported) + * + * Arguments: + * @param[in] field - field to set the attribute for + * @param[in] value - value to set the attribute to (sai_object_id) + * @param[in] existing_tunnel_id - the id of the tunnel you want to set the attribute for + * + * Return Values: + * @return true on success and false if there's an error + */ +bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id) +{ + + sai_attribute_t attr; + + if (field == decap_dscp_to_tc_field_name) + { + // TC remapping. + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + attr.value.oid = value; + + } + else if (field == decap_tc_to_pg_field_name) + { + // TC to PG remapping + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + attr.value.oid = value; + } + + sai_status_t status = sai_tunnel_api->set_tunnel_attribute(existing_tunnel_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set attribute %s with value %" PRIu64, field.c_str(), value); + task_process_status handle_status = handleSaiSetStatus(SAI_API_TUNNEL, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set attribute %s with value %" PRIu64, field.c_str(), value); + return true; +} + /** * Function Description: * @brief sets ips for a particular tunnel. deletes ips that are old and adds new ones @@ -530,7 +686,7 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string ip = tunnel_entry_info.ip_address; + string ip = tunnel_entry_info.dst_ip; if (!new_ip_addresses.contains(ip)) { if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) @@ -541,12 +697,12 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s else { // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, "0.0.0.0", ip, TUNNEL_TERM_TYPE_P2MP }); } } // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, new_ip_addresses, tunnel_id)) + if(!addDecapTunnelTermEntries(key, IpAddress(0), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) { return false; } @@ -573,7 +729,8 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, tunnel_entry_info.ip_address)) + string term_key = tunnel_entry_info.src_ip + '-' + tunnel_entry_info.dst_ip; + if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) { return false; } @@ -618,7 +775,7 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string ip) +bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string key) { sai_status_t status; @@ -634,8 +791,8 @@ bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, } // making sure to remove all instances of the ip address - existingIps.erase(ip); - SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", ip.c_str()); + existingIps.erase(key); + SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", key.c_str()); return true; } @@ -803,3 +960,39 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) return tunnelTable[tunnelKey].dst_ip_addrs; } + + +std::string TunnelDecapOrch::getDscpMode(const std::string &tunnelKey) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return ""; + } + return iter->second.dscp_mode; +} + +bool TunnelDecapOrch::getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return false; + } + if (qos_table_type == encap_tc_to_dscp_field_name) + { + oid = iter->second.encap_tc_to_dscp_map_id; + } + else if (qos_table_type == encap_tc_to_queue_field_name) + { + oid = iter->second.encap_tc_to_queue_map_id; + } + else + { + SWSS_LOG_ERROR("Unsupported qos type %s", qos_table_type.c_str()); + return false; + } + return true; +} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index f7b5f923d9..6d8acebde9 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -9,18 +9,34 @@ #include "ipaddress.h" #include "ipaddresses.h" + +enum TunnelTermType +{ + TUNNEL_TERM_TYPE_P2P, + TUNNEL_TERM_TYPE_P2MP +}; + +/* Constants */ +#define MUX_TUNNEL "MuxTunnel0" + + struct TunnelTermEntry { sai_object_id_t tunnel_term_id; - std::string ip_address; + std::string src_ip; + std::string dst_ip; + TunnelTermType term_type; }; struct TunnelEntry { - sai_object_id_t tunnel_id; // tunnel id - sai_object_id_t overlay_intf_id; // overlay interface id - swss::IpAddresses dst_ip_addrs; // destination ip addresses - std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + sai_object_id_t tunnel_id; // tunnel id + sai_object_id_t overlay_intf_id; // overlay interface id + swss::IpAddresses dst_ip_addrs; // destination ip addresses + std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + std::string dscp_mode; // dscp_mode, will be used in muxorch + sai_object_id_t encap_tc_to_dscp_map_id; // TC_TO_DSCP map id, will be used in muxorch + sai_object_id_t encap_tc_to_queue_map_id; // TC_TO_QUEUE map id, will be used in muxorch }; struct NexthopTunnel @@ -32,7 +48,10 @@ struct NexthopTunnel /* TunnelTable: key string, tunnel object id */ typedef std::map TunnelTable; -/* ExistingIps: ips that currently have term entries */ +/* + ExistingIps: ips that currently have term entries, + Key in ExistingIps is src_ip-dst_ip +*/ typedef std::unordered_set ExistingIps; /* Nexthop IP to refcount map */ @@ -49,21 +68,23 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); swss::IpAddresses getDstIpAddresses(std::string tunnelKey); - + std::string getDscpMode(const std::string &tunnelKey) const; + bool getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const; private: TunnelTable tunnelTable; ExistingIps existingIps; TunnelNhs tunnelNhs; bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, - std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl); + std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, + sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id); bool removeDecapTunnel(std::string key); - bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id); + bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType type); bool removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, std::string ip); bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); - bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); + bool setTunnelAttribute(std::string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); int incNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); diff --git a/tests/test_mux.py b/tests/test_mux.py index eb31d2258e..369edd2a80 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -30,6 +30,9 @@ class TestMuxTunnelBase(): STATE_FDB_TABLE = "FDB_TABLE" MUX_TUNNEL_0 = "MuxTunnel0" PEER_SWITCH_HOST = "peer_switch_hostname" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" SELF_IPV4 = "10.1.0.32" PEER_IPV4 = "10.1.0.33" @@ -83,6 +86,11 @@ class TestMuxTunnelBase(): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} + TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + def create_vlan_interface(self, dvs): confdb = dvs.get_config_db() @@ -594,13 +602,18 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_VRF_TABLE, sai_oid) return True - def create_and_test_peer(self, asicdb): + def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dscp_map_oid=None, tc_to_queue_map_oid=None): """ Create PEER entry verify all needed enties in ASIC DB exists """ # check asic db table # There will be two tunnels, one P2MP and another P2P tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 2) + if tc_to_dscp_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs + if tc_to_queue_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP" in fvs + p2p_obj = None for tunnel_sai_obj in tunnels: @@ -634,35 +647,63 @@ def create_and_test_peer(self, asicdb): assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" elif field == "SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION": assert value == "SAI_PACKET_ACTION_DROP" + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP": + assert value == tc_to_dscp_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP": + assert value == tc_to_queue_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE": + assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" else: assert False, "Field %s is not tested" % field - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) - + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: fvs = asicdb.get_entry(self.ASIC_TUNNEL_TERM_ENTRIES, term_entry) - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs.items(): if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field - def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in tunnel_params + is_symmetric_tunnel = "src_ip" in kwargs + + # 6 parameters to check in case of decap tunnel + # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel + expected_len = 7 if is_symmetric_tunnel else 6 + + if 'decap_tc_to_pg_map_id' in kwargs: + expected_len += 1 + decap_tc_to_pg_map_id = kwargs.pop('decap_tc_to_pg_map_id') + + if 'decap_dscp_to_tc_map_id' in kwargs: + expected_len += 1 + decap_dscp_to_tc_map_id = kwargs.pop('decap_dscp_to_tc_map_id') + + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + fvs = create_fvs(**kwargs) + ps.set(tunnel_name, fvs) + # wait till config will be applied + time.sleep(1) # check asic db table tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 1) @@ -671,19 +712,17 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, tunnel_sai_obj) - # 6 parameters to check in case of decap tunnel - # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + assert len(fvs) == expected_len - expected_ecn_mode = self.ecn_modes_map[tunnel_params["ecn_mode"]] - expected_dscp_mode = self.dscp_modes_map[tunnel_params["dscp_mode"]] - expected_ttl_mode = self.ttl_modes_map[tunnel_params["ttl_mode"]] + expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] + expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] + expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == tunnel_params["src_ip"] + assert value == kwargs["src_ip"] elif field == "SAI_TUNNEL_ATTR_DECAP_ECN_MODE": assert value == expected_ecn_mode elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": @@ -694,12 +733,15 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == decap_dscp_to_tc_map_id + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value == decap_tc_to_pg_map_id else: assert False, "Field %s is not tested" % field - + src_ip = kwargs['src_ip'] if 'src_ip' in kwargs else None self.check_tunnel_termination_entry_exists_in_asicdb( - asicdb, tunnel_sai_obj, tunnel_params["dst_ip"].split(",") - ) + asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -727,6 +769,23 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) + # Apply QoS map to config db + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + return oid + + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_oid): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) + table._del(qos_map_oid) def check_app_db_neigh_table( self, appdb, intf, neigh_ip, mac="00:00:00:00:00:00", expect_entry=True @@ -918,6 +977,24 @@ def intf_fdb_map(self, dvs, setup_vlan): class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ + @pytest.fixture(scope='class') + def setup(self, dvs): + db = dvs.get_config_db() + asicdb = dvs.get_asic_db() + + tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + dscp_to_tc_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + yield tc_to_dscp_map_oid, tc_to_queue_map_oid, dscp_to_tc_map_oid, tc_to_pg_map_oid + + self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) + self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) + def test_Tunnel(self, dvs, setup_tunnel, testlog): """ test IPv4 Mux tunnel creation """ @@ -926,16 +1003,27 @@ def test_Tunnel(self, dvs, setup_tunnel, testlog): asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) - + _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, self.DEFAULT_TUNNEL_PARAMS) - - def test_Peer(self, dvs, setup_peer_switch, testlog): + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", + src_ip="10.1.0.33", dst_ip="10.1.0.32", dscp_mode="pipe", + ecn_mode="standard", ttl_mode="pipe", + encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, + encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME, + decap_dscp_to_tc_map=self.TUNNEL_QOS_MAP_NAME, + decap_dscp_to_tc_map_id = dscp_to_tc_map_oid, + decap_tc_to_pg_map=self.TUNNEL_QOS_MAP_NAME, + decap_tc_to_pg_map_id=tc_to_pg_map_oid) + + def test_Peer(self, dvs, setup_peer_switch, setup): """ test IPv4 Mux tunnel creation """ - + + db = dvs.get_config_db() asicdb = dvs.get_asic_db() - self.create_and_test_peer(asicdb) + encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup + + self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) def test_Neighbor(self, dvs, dvs_route, testlog): """ test Neighbor entries and mux state change """ diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index b69e6b6b73..9f09eb2870 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -14,6 +14,9 @@ class TestTunnelBase(object): ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -30,6 +33,9 @@ class TestTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + # Define 2 dummy maps + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} def check_interface_exists_in_asicdb(self, asicdb, sai_oid): if_table = swsscommon.Table(asicdb, self.ASIC_RIF_TABLE) @@ -41,12 +47,15 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): status, fvs = vfr_table.get(sai_oid) return status - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) tunnel_term_entries = tunnel_term_table.getKeys() assert len(tunnel_term_entries) == len(dst_ips) + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + expected_len = 6 if src_ip else 5 + for term_entry in tunnel_term_entries: status, fvs = tunnel_term_table.get(term_entry) @@ -57,13 +66,15 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field @@ -72,6 +83,17 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): is_symmetric_tunnel = "src_ip" in kwargs; + is_symmetric_tunnel = "src_ip" in kwargs + + decap_dscp_to_tc_map_oid = None + decap_tc_to_pg_map_oid = None + + if "decap_dscp_to_tc_map_oid" in kwargs: + decap_dscp_to_tc_map_oid = kwargs.pop("decap_dscp_to_tc_map_oid") + + if "decap_tc_to_pg_map_oid" in kwargs: + decap_tc_to_pg_map_oid = kwargs.pop("decap_tc_to_pg_map_oid") + # create tunnel entry in DB ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) @@ -95,12 +117,19 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert status == True # 6 parameters to check in case of decap tunnel # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + expected_len = 7 if is_symmetric_tunnel else 6 expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + if decap_dscp_to_tc_map_oid: + expected_len += 1 + if decap_tc_to_pg_map_oid: + expected_len += 1 + + assert len(fvs) == expected_len + for field, value in fvs: if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" @@ -116,10 +145,14 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == decap_dscp_to_tc_map_oid + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value == decap_tc_to_pg_map_oid else: assert False, "Field %s is not tested" % field - - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) + src_ip = kwargs["src_ip"] if "src_ip" in kwargs else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -147,7 +180,27 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + """ Add qos map for testing""" + qos_table = swsscommon.Table(asicdb, self.ASIC_QOS_MAP_TABLE_KEY) + current_oids = qos_table.getKeys() + # Apply QoS map to config db + table = swsscommon.Table(configdb, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(qos_table.getKeys()) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + return oid + + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_oid): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb, qos_map_type_name) + table._del(qos_map_oid) + def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -195,7 +248,35 @@ def test_TunnelDecap_v6(self, dvs, testlog): ecn_mode="copy_from_outer", ttl_mode="uniform") self.remove_and_test_tunnel(db, asicdb,"IPINIPv6Decap") +def test_TunnelDecap_MuxTunnel(self, dvs, testlog): + """ Test MuxTunnel creation. """ + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, asicdb) + dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + # Create MuxTunnel0 with QoS remapping attributes + params = { + "tunnel_type": "IPINIP", + "src_ip": "1.1.1.1", + "dst_ip": "1.1.1.2", + "dscp_mode": "pipe", + "ecn_mode": "copy_from_outer", + "ttl_mode": "uniform", + "decap_dscp_to_tc_map": "AZURE_TUNNEL", + "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, + "decap_tc_to_pg_map": "AZURE_TUNNEL", + "decap_tc_to_pg_map_oid": tc_to_pg_map_oid + } + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) + + self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) + class TestSymmetricTunnel(TestTunnelBase): """ Tests for symmetric tunnel creation and removal """ From 04933ce496f51ac5ea69841b4d0c46049733b862 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 28 Apr 2022 03:35:57 +0000 Subject: [PATCH 2/5] Fix build error Signed-off-by: bingwang --- orchagent/orchdaemon.cpp | 5 +++-- orchagent/qosorch.cpp | 3 +++ orchagent/tunneldecaporch.h | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 9dd36000af..56651b04ad 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -39,6 +39,7 @@ SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; BfdOrch *gBfdOrch; +QosOrch *gQosOrch; bool gIsNatSupported = false; @@ -182,7 +183,7 @@ bool OrchDaemon::init() CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_TC_TO_DSCP_MAP_TABLE_NAME }; - QosOrch *qos_orch = new QosOrch(m_configDb, qos_tables); + gQosOrch = new QosOrch(m_configDb, qos_tables); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, @@ -275,7 +276,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gIntfsOrch, gNeighOrch, gRouteOrch, copp_orch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, mux_orch, mux_cb_orch, gBfdOrch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gIntfsOrch, gNeighOrch, gRouteOrch, copp_orch, gQosOrch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, mux_orch, mux_cb_orch, gBfdOrch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index e5f9fa01a9..b5c79fae15 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -74,6 +74,9 @@ type_map QosOrch::m_qos_maps = { {APP_TUNNEL_DECAP_TABLE_NAME, new object_reference_map()} }; +#define DSCP_MAX_VAL 63 +#define EXP_MAX_VAL 7 + task_process_status QosMapHandler::processWorkItem(Consumer& consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 6d8acebde9..04d7928e37 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -85,6 +85,7 @@ class TunnelDecapOrch : public Orch bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); bool setTunnelAttribute(std::string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id); + bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); int incNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); From 7b6eda898e5a19f79995cf57c90b1147a6c1e14d Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 28 Apr 2022 08:12:37 +0000 Subject: [PATCH 3/5] Fix vstest Signed-off-by: bingwang --- orchagent/tunneldecaporch.cpp | 72 +++++++++++++++----- tests/test_mux.py | 125 ++++++++++++++++------------------ tests/test_tunnel.py | 10 ++- 3 files changed, 118 insertions(+), 89 deletions(-) diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 083ce83e33..2f22802227 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -50,14 +50,13 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ecn_mode; string encap_ecn_mode; string ttl_mode; - sai_object_id_t dscp_to_dc_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t dscp_to_tc_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; // The tc_to_dscp_map_id and tc_to_queue_map_id are parsed here for muxorch to retrieve sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; - bool valid = true; - + task_process_status task_status = task_process_status::task_success; sai_object_id_t tunnel_id = SAI_NULL_OBJECT_ID; // checking to see if the tunnel already exists @@ -78,7 +77,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (tunnel_type != "IPINIP") { SWSS_LOG_ERROR("Invalid tunnel type %s", tunnel_type.c_str()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } } @@ -91,7 +90,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) catch (const std::invalid_argument &e) { SWSS_LOG_ERROR("%s", e.what()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -109,7 +108,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) catch (const std::invalid_argument &e) { SWSS_LOG_ERROR("%s", e.what()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -123,7 +122,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (dscp_mode != "uniform" && dscp_mode != "pipe") { SWSS_LOG_ERROR("Invalid dscp mode %s\n", dscp_mode.c_str()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -138,7 +137,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (ecn_mode != "copy_from_outer" && ecn_mode != "standard") { SWSS_LOG_ERROR("Invalid ecn mode %s\n", ecn_mode.c_str()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -152,7 +151,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (encap_ecn_mode != "standard") { SWSS_LOG_ERROR("Only standard encap ecn mode is supported currently %s\n", ecn_mode.c_str()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -166,7 +165,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (ttl_mode != "uniform" && ttl_mode != "pipe") { SWSS_LOG_ERROR("Invalid ttl mode %s\n", ttl_mode.c_str()); - valid = false; + task_status = task_process_status::task_invalid_entry; break; } if (exists) @@ -176,15 +175,27 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } else if (fvField(i) == decap_dscp_to_tc_field_name) { - dscp_to_dc_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_dscp_to_tc_field_name, t); - if (exists && dscp_to_dc_map_id != SAI_NULL_OBJECT_ID) + dscp_to_tc_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_dscp_to_tc_field_name, t); + if (dscp_to_tc_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", decap_dscp_to_tc_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } + if (exists && dscp_to_tc_map_id != SAI_NULL_OBJECT_ID) { - setTunnelAttribute(fvField(i), dscp_to_dc_map_id, tunnel_id); + setTunnelAttribute(fvField(i), dscp_to_tc_map_id, tunnel_id); } } else if (fvField(i) == decap_tc_to_pg_field_name) { tc_to_pg_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_tc_to_pg_field_name, t); + if (tc_to_pg_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", decap_tc_to_pg_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } if (exists && tc_to_pg_map_id != SAI_NULL_OBJECT_ID) { setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); @@ -193,6 +204,12 @@ void TunnelDecapOrch::doTask(Consumer& consumer) else if (fvField(i) == encap_tc_to_dscp_field_name) { tc_to_dscp_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_dscp_field_name, t); + if (tc_to_dscp_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", encap_tc_to_dscp_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } if (exists) { // Record only @@ -202,6 +219,12 @@ void TunnelDecapOrch::doTask(Consumer& consumer) else if (fvField(i) == encap_tc_to_queue_field_name) { tc_to_queue_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_queue_field_name, t); + if (tc_to_queue_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", encap_tc_to_queue_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } if (exists) { // Record only @@ -209,20 +232,22 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } } } - + // create new tunnel if it doesn't exists already - if (valid && !exists) + if (task_status == task_process_status::task_success && !exists) { if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, - dscp_to_dc_map_id, tc_to_pg_map_id)) + dscp_to_tc_map_id, tc_to_pg_map_id)) { // Record only tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; + task_status = task_process_status::task_success; SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } else { + task_status = task_process_status::task_failed; SWSS_LOG_ERROR("Failed to add tunnels to ASIC_DB."); } } @@ -240,7 +265,20 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } } - it = consumer.m_toSync.erase(it); + switch (task_status) + { + case task_process_status::task_failed: + case task_process_status::task_success: + case task_process_status::task_invalid_entry: + it = consumer.m_toSync.erase(it); + break; + case task_process_status::task_need_retry: + ++it; + break; + default: + it = consumer.m_toSync.erase(it); + break; + } } } diff --git a/tests/test_mux.py b/tests/test_mux.py index 369edd2a80..e4c7a008df 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -59,12 +59,21 @@ class TestMuxTunnelBase(): SAI_ROUTER_INTERFACE_ATTR_TYPE = "SAI_ROUTER_INTERFACE_ATTR_TYPE" SAI_ROUTER_INTERFACE_TYPE_VLAN = "SAI_ROUTER_INTERFACE_TYPE_VLAN" + TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} + TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + DEFAULT_TUNNEL_PARAMS = { "tunnel_type": "IPINIP", "dst_ip": SELF_IPV4, - "dscp_mode": "uniform", + "dscp_mode": "pipe", "ecn_mode": "standard", - "ttl_mode": "pipe" + "ttl_mode": "pipe", + "decap_tc_to_pg_map": "[" + swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME + "|" + TUNNEL_QOS_MAP_NAME + "]", + "decap_dscp_to_tc_map": "[" + swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME + "|" + TUNNEL_QOS_MAP_NAME + "]", + "encap_tc_to_queue_map": "[" + swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME + "|" + TUNNEL_QOS_MAP_NAME + "]", + "encap_tc_to_dscp_map": "[" + swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME + "|" + TUNNEL_QOS_MAP_NAME + "]" } DEFAULT_PEER_SWITCH_PARAMS = { @@ -86,11 +95,6 @@ class TestMuxTunnelBase(): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } - TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} - TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} - DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} - TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} - def create_vlan_interface(self, dvs): confdb = dvs.get_config_db() @@ -609,11 +613,6 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc # There will be two tunnels, one P2MP and another P2P tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 2) - if tc_to_dscp_map_oid: - assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs - if tc_to_queue_map_oid: - assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP" in fvs - p2p_obj = None for tunnel_sai_obj in tunnels: @@ -630,6 +629,11 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip, tc_to_dsc fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, p2p_obj) + if tc_to_dscp_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs + if tc_to_queue_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP" in fvs + for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" @@ -681,29 +685,20 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid else: assert False, "Field %s is not tested" % field - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_param): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs + is_symmetric_tunnel = "src_ip" in tunnel_param # 6 parameters to check in case of decap tunnel # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel expected_len = 7 if is_symmetric_tunnel else 6 - if 'decap_tc_to_pg_map_id' in kwargs: + if 'decap_tc_to_pg_map_id' in tunnel_param: expected_len += 1 - decap_tc_to_pg_map_id = kwargs.pop('decap_tc_to_pg_map_id') - if 'decap_dscp_to_tc_map_id' in kwargs: + if 'decap_dscp_to_tc_map_id' in tunnel_param: expected_len += 1 - decap_dscp_to_tc_map_id = kwargs.pop('decap_dscp_to_tc_map_id') - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - fvs = create_fvs(**kwargs) - ps.set(tunnel_name, fvs) - # wait till config will be applied - time.sleep(1) # check asic db table tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 1) @@ -714,15 +709,15 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert len(fvs) == expected_len - expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] - expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] - expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + expected_ecn_mode = self.ecn_modes_map[tunnel_param["ecn_mode"]] + expected_dscp_mode = self.dscp_modes_map[tunnel_param["dscp_mode"]] + expected_ttl_mode = self.ttl_modes_map[tunnel_param["ttl_mode"]] for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == kwargs["src_ip"] + assert value == tunnel_param["src_ip"] elif field == "SAI_TUNNEL_ATTR_DECAP_ECN_MODE": assert value == expected_ecn_mode elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": @@ -734,14 +729,14 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": - assert value == decap_dscp_to_tc_map_id + assert value == tunnel_param["decap_dscp_to_tc_map_id"] elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": - assert value == decap_tc_to_pg_map_id + assert value == tunnel_param["decap_tc_to_pg_map_id"] else: assert False, "Field %s is not tested" % field - src_ip = kwargs['src_ip'] if 'src_ip' in kwargs else None + src_ip = tunnel_param['src_ip'] if 'src_ip' in tunnel_param else None self.check_tunnel_termination_entry_exists_in_asicdb( - asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) + asicdb, tunnel_sai_obj, tunnel_param["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -868,6 +863,24 @@ def setup_mux_cable(self, dvs): config_db = dvs.get_config_db() self.create_mux_cable(config_db) + @pytest.fixture(scope='module') + def setup_qos_map(self, dvs): + db = dvs.get_config_db() + asicdb = dvs.get_asic_db() + + tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + dscp_to_tc_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + yield tc_to_dscp_map_oid, tc_to_queue_map_oid, dscp_to_tc_map_oid, tc_to_pg_map_oid + + self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) + self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) + @pytest.fixture(scope='module') def setup_tunnel(self, dvs): app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -977,51 +990,31 @@ def intf_fdb_map(self, dvs, setup_vlan): class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ - @pytest.fixture(scope='class') - def setup(self, dvs): - db = dvs.get_config_db() - asicdb = dvs.get_asic_db() - - tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) - tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) - - dscp_to_tc_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) - tc_to_pg_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) - - yield tc_to_dscp_map_oid, tc_to_queue_map_oid, dscp_to_tc_map_oid, tc_to_pg_map_oid - - self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) - self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) - self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) - self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) - - - def test_Tunnel(self, dvs, setup_tunnel, testlog): + def test_Tunnel(self, dvs, setup_tunnel, setup_qos_map, testlog): """ test IPv4 Mux tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) - _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup + _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup_qos_map + test_param = self.DEFAULT_TUNNEL_PARAMS.copy() + test_param.update( + { + "decap_dscp_to_tc_map_id": dscp_to_tc_map_oid, + "decap_tc_to_pg_map_id": tc_to_pg_map_oid + } + ) # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - src_ip="10.1.0.33", dst_ip="10.1.0.32", dscp_mode="pipe", - ecn_mode="standard", ttl_mode="pipe", - encap_tc_to_queue_map=self.TUNNEL_QOS_MAP_NAME, - encap_tc_to_dscp_map=self.TUNNEL_QOS_MAP_NAME, - decap_dscp_to_tc_map=self.TUNNEL_QOS_MAP_NAME, - decap_dscp_to_tc_map_id = dscp_to_tc_map_oid, - decap_tc_to_pg_map=self.TUNNEL_QOS_MAP_NAME, - decap_tc_to_pg_map_id=tc_to_pg_map_oid) - - def test_Peer(self, dvs, setup_peer_switch, setup): + self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, test_param) + + def test_Peer(self, dvs, setup_peer_switch, setup_tunnel, setup_qos_map, testlog): """ test IPv4 Mux tunnel creation """ db = dvs.get_config_db() asicdb = dvs.get_asic_db() - encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup + encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup_qos_map self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32", encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index 9f09eb2870..ac02ba90b5 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -60,7 +60,7 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid status, fvs = tunnel_term_table.get(term_entry) assert status == True - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs: if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": @@ -81,8 +81,6 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; - is_symmetric_tunnel = "src_ip" in kwargs decap_dscp_to_tc_map_oid = None @@ -248,7 +246,7 @@ def test_TunnelDecap_v6(self, dvs, testlog): ecn_mode="copy_from_outer", ttl_mode="uniform") self.remove_and_test_tunnel(db, asicdb,"IPINIPv6Decap") -def test_TunnelDecap_MuxTunnel(self, dvs, testlog): + def test_TunnelDecap_MuxTunnel(self, dvs, testlog): """ Test MuxTunnel creation. """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -267,9 +265,9 @@ def test_TunnelDecap_MuxTunnel(self, dvs, testlog): "dscp_mode": "pipe", "ecn_mode": "copy_from_outer", "ttl_mode": "uniform", - "decap_dscp_to_tc_map": "AZURE_TUNNEL", + "decap_dscp_to_tc_map": "[" + swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME + "|AZURE_TUNNEL]", "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, - "decap_tc_to_pg_map": "AZURE_TUNNEL", + "decap_tc_to_pg_map": "[" + swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME + "|AZURE_TUNNEL]", "decap_tc_to_pg_map_oid": tc_to_pg_map_oid } self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) From 9ad4f7baa1c4483ef47d539e7aee47b456b9985d Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 9 May 2022 12:22:14 +0000 Subject: [PATCH 4/5] Improve retry logic Signed-off-by: bingwang --- orchagent/tunneldecaporch.cpp | 38 +++++++++++++++-------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 2f22802227..4897836a04 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -50,6 +50,8 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ecn_mode; string encap_ecn_mode; string ttl_mode; + bool valid = true; + sai_object_id_t dscp_to_tc_map_id = SAI_NULL_OBJECT_ID; sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; // The tc_to_dscp_map_id and tc_to_queue_map_id are parsed here for muxorch to retrieve @@ -77,7 +79,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (tunnel_type != "IPINIP") { SWSS_LOG_ERROR("Invalid tunnel type %s", tunnel_type.c_str()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } } @@ -90,7 +92,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) catch (const std::invalid_argument &e) { SWSS_LOG_ERROR("%s", e.what()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -108,7 +110,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) catch (const std::invalid_argument &e) { SWSS_LOG_ERROR("%s", e.what()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -122,7 +124,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (dscp_mode != "uniform" && dscp_mode != "pipe") { SWSS_LOG_ERROR("Invalid dscp mode %s\n", dscp_mode.c_str()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -137,7 +139,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (ecn_mode != "copy_from_outer" && ecn_mode != "standard") { SWSS_LOG_ERROR("Invalid ecn mode %s\n", ecn_mode.c_str()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -151,7 +153,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (encap_ecn_mode != "standard") { SWSS_LOG_ERROR("Only standard encap ecn mode is supported currently %s\n", ecn_mode.c_str()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -165,7 +167,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (ttl_mode != "uniform" && ttl_mode != "pipe") { SWSS_LOG_ERROR("Invalid ttl mode %s\n", ttl_mode.c_str()); - task_status = task_process_status::task_invalid_entry; + valid = false; break; } if (exists) @@ -233,8 +235,13 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } } + if (task_status == task_process_status::task_need_retry) + { + ++it; + continue; + } // create new tunnel if it doesn't exists already - if (task_status == task_process_status::task_success && !exists) + if (valid && !exists) { if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, dscp_to_tc_map_id, tc_to_pg_map_id)) @@ -265,20 +272,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } } - switch (task_status) - { - case task_process_status::task_failed: - case task_process_status::task_success: - case task_process_status::task_invalid_entry: - it = consumer.m_toSync.erase(it); - break; - case task_process_status::task_need_retry: - ++it; - break; - default: - it = consumer.m_toSync.erase(it); - break; - } + it = consumer.m_toSync.erase(it); } } From 47914af4263fdc4234324cad41d3fdd056b15580 Mon Sep 17 00:00:00 2001 From: bingwang Date: Sun, 8 May 2022 19:27:17 -0700 Subject: [PATCH 5/5] Fix bug in remove tunnel term Signed-off-by: bingwang --- orchagent/qosorch.cpp | 2 +- orchagent/tunneldecaporch.cpp | 30 +++++++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index b5c79fae15..f5dd9fe77d 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -251,7 +251,7 @@ sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &at } SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); - applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, sai_object); + //applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, sai_object); return sai_object; } diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 4897836a04..ac1119d663 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -97,7 +97,23 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); + bool is_p2p_tunnel = false; + for (auto term : tunnelTable[key].tunnel_term_info) + { + if (term.term_type == TUNNEL_TERM_TYPE_P2P) + { + is_p2p_tunnel = true; + break; + } + } + if (is_p2p_tunnel) + { + SWSS_LOG_ERROR("cannot modify dst ip for existing tunnel with P2P terminator"); + } + else + { + setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); + } } } else if (fvField(i) == "src_ip") @@ -557,7 +573,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres // pop the last element for the next loop tunnel_table_entry_attrs.pop_back(); - SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", dst_ip.c_str()); + SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", key.c_str()); } } @@ -761,7 +777,15 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string term_key = tunnel_entry_info.src_ip + '-' + tunnel_entry_info.dst_ip; + string term_key; + if (swss::IpAddress(tunnel_entry_info.src_ip).isZero()) + { + term_key = tunnel_entry_info.dst_ip; + } + else + { + term_key = tunnel_entry_info.src_ip + '-' + tunnel_entry_info.dst_ip; + } if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) { return false;