diff --git a/orchagent/mirrororch.cpp b/orchagent/mirrororch.cpp index 37c2ef73df..0a73030f40 100644 --- a/orchagent/mirrororch.cpp +++ b/orchagent/mirrororch.cpp @@ -39,6 +39,11 @@ #define MIRROR_SESSION_DSCP_MIN 0 #define MIRROR_SESSION_DSCP_MAX 63 +// 15 is a typical value, but if vendor's SAI does not supply the maximum value, +// allow all 8-bit numbers, effectively cancelling validation by orchagent. +#define MIRROR_SESSION_DEFAULT_NUM_TC 255 + +extern sai_switch_api_t *sai_switch_api; extern sai_mirror_api_t *sai_mirror_api; extern sai_port_api_t *sai_port_api; @@ -80,9 +85,26 @@ MirrorOrch::MirrorOrch(TableConnector stateDbConnector, TableConnector confDbCon m_policerOrch(policerOrch), m_mirrorTable(stateDbConnector.first, stateDbConnector.second) { + sai_status_t status; + sai_attribute_t attr; + m_portsOrch->attach(this); m_neighOrch->attach(this); m_fdbOrch->attach(this); + + // Retrieve the number of valid values for queue, starting at 0 + attr.id = SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to get switch attribute number of traffic classes. \ + Use default value. rv:%d", status); + m_maxNumTC = MIRROR_SESSION_DEFAULT_NUM_TC; + } + else + { + m_maxNumTC = attr.value.u8; + } } bool MirrorOrch::bake() @@ -373,6 +395,11 @@ task_process_status MirrorOrch::createEntry(const string& key, const vector(fvValue(i)); + if (entry.queue >= m_maxNumTC) + { + SWSS_LOG_ERROR("Failed to get valid queue %s", fvValue(i).c_str()); + return task_process_status::task_invalid_entry; + } } else if (fvField(i) == MIRROR_SESSION_POLICER) { diff --git a/orchagent/mirrororch.h b/orchagent/mirrororch.h index 1e0b19c1e6..b31d58bff3 100644 --- a/orchagent/mirrororch.h +++ b/orchagent/mirrororch.h @@ -95,6 +95,8 @@ class MirrorOrch : public Orch, public Observer, public Subject NeighOrch *m_neighOrch; FdbOrch *m_fdbOrch; PolicerOrch *m_policerOrch; + // Maximum number of traffic classes starting at 0, thus queue can be 0 - m_maxNumTC-1 + uint8_t m_maxNumTC; Table m_mirrorTable; diff --git a/tests/test_mirror_port_span.py b/tests/test_mirror_port_span.py index 1aec6e472c..7c27eff85c 100644 --- a/tests/test_mirror_port_span.py +++ b/tests/test_mirror_port_span.py @@ -7,6 +7,54 @@ @pytest.mark.usefixtures('dvs_mirror_manager') @pytest.mark.usefixtures('dvs_policer_manager') class TestMirror(object): + + def check_syslog(self, dvs, marker, log, expected_cnt): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)]) + assert out.strip() == str(expected_cnt) + + + def test_PortMirrorQueue(self, dvs, testlog): + """ + This test covers valid and invalid values of the queue parameter. All sessions have source & dest port. + Operation flow: + 1. Create mirror session with queue 0, verify session becomes active and error not written to log. + 2. Create mirror session with queue max valid value, verify session becomes active and error not written to log. + 3. Create mirror session with queue max valid value + 1, verify session doesnt get created and error written to log. + Due to lag in table operations, verify_no_mirror is necessary at the end of each step, to ensure cleanup before next step. + Note that since orchagent caches max valid value during initialization, this test cannot simulate a value from SAI, e.g. + by calling setReadOnlyAttr, because orchagent has already completed initialization and would never read the simulated value. + Therefore, the default value must be used, MIRROR_SESSION_DEFAULT_NUM_TC which is defined in mirrororch.cpp as 255. + """ + + session = "TEST_SESSION" + dst_port = "Ethernet16" + src_ports = "Ethernet12" + + # Sub Test 1 + marker = dvs.add_log_marker() + self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="BOTH", queue="0") + self.dvs_mirror.verify_session_status(session) + self.dvs_mirror.remove_mirror_session(session) + self.dvs_mirror.verify_no_mirror() + self.check_syslog(dvs, marker, "Failed to get valid queue 0", 0) + + # Sub Test 2 + marker = dvs.add_log_marker() + self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="RX", queue="254") + self.dvs_mirror.verify_session_status(session) + self.dvs_mirror.remove_mirror_session(session) + self.dvs_mirror.verify_no_mirror() + self.check_syslog(dvs, marker, "Failed to get valid queue 254", 0) + + # Sub Test 3 + marker = dvs.add_log_marker() + self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="TX", queue="255") + self.dvs_mirror.verify_session_status(session, expected=0) + self.dvs_mirror.remove_mirror_session(session) + self.dvs_mirror.verify_no_mirror() + self.check_syslog(dvs, marker, "Failed to get valid queue 255", 1) + + def test_PortMirrorAddRemove(self, dvs, testlog): """ This test covers the basic SPAN mirror session creation and removal operations @@ -471,7 +519,6 @@ def test_PortLAGMirrorUpdateLAG(self, dvs, testlog): self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 0) - # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy():