From dc477fb0c4ae61bd4a92576361aa98bb59ddba4a Mon Sep 17 00:00:00 2001 From: "anton.novikau" Date: Wed, 3 Aug 2022 22:40:40 +0300 Subject: [PATCH 01/10] [swss/cfgmgr] teammgr configure lacp rate (#2121) * add lacp_rate config * add lacp_rate to teammgr addLag params * docs for portchannel db config * add test Co-authored-by: Anton Co-authored-by: Myron Sosyak --- cfgmgr/teammgr.cpp | 20 ++++++++++++++++---- cfgmgr/teammgr.h | 2 +- tests/conftest.py | 7 ++++--- tests/dvslib/dvs_lag.py | 18 +++++++++++++++--- tests/test_portchannel.py | 26 +++++++++++++++++++++++++- 5 files changed, 61 insertions(+), 12 deletions(-) diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index 31f911741c..273674fbee 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -252,6 +252,7 @@ void TeamMgr::doLagTask(Consumer &consumer) { int min_links = 0; bool fallback = false; + bool fast_rate = false; string admin_status = DEFAULT_ADMIN_STATUS_STR; string mtu = DEFAULT_MTU_STR; string learn_mode; @@ -293,12 +294,18 @@ void TeamMgr::doLagTask(Consumer &consumer) { tpid = fvValue(i); SWSS_LOG_INFO("Get TPID %s", tpid.c_str()); - } + } + else if (fvField(i) == "fast_rate") + { + fast_rate = fvValue(i) == "true"; + SWSS_LOG_INFO("Get fast_rate `%s`", + fast_rate ? "true" : "false"); + } } if (m_lagList.find(alias) == m_lagList.end()) { - if (addLag(alias, min_links, fallback) == task_need_retry) + if (addLag(alias, min_links, fallback, fast_rate) == task_need_retry) { it++; continue; @@ -553,7 +560,7 @@ bool TeamMgr::setLagLearnMode(const string &alias, const string &learn_mode) return true; } -task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fallback) +task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fallback, bool fast_rate) { SWSS_LOG_ENTER(); @@ -610,6 +617,11 @@ task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fal conf << ",\"fallback\":true"; } + if (fast_rate) + { + conf << ",\"fast_rate\":true"; + } + conf << "}}'"; SWSS_LOG_INFO("Port channel %s teamd configuration: %s", @@ -652,7 +664,7 @@ bool TeamMgr::removeLag(const string &alias) } // Port-channel names are in the pattern of "PortChannel####" -// +// // The LACP key could be generated in 3 ways based on the value in config DB: // 1. "auto" - LACP key is extracted from the port-channel name and is set to be the number at the end of the port-channel name // We are adding 1 at the beginning to avoid LACP key collisions between similar LACP keys e.g. PortChannel10 and PortChannel010. diff --git a/cfgmgr/teammgr.h b/cfgmgr/teammgr.h index db87fdd1f4..3c98f87dc5 100644 --- a/cfgmgr/teammgr.h +++ b/cfgmgr/teammgr.h @@ -41,7 +41,7 @@ class TeamMgr : public Orch void doLagMemberTask(Consumer &consumer); void doPortUpdateTask(Consumer &consumer); - task_process_status addLag(const std::string &alias, int min_links, bool fall_back); + task_process_status addLag(const std::string &alias, int min_links, bool fall_back, bool fast_rate); bool removeLag(const std::string &alias); task_process_status addLagMember(const std::string &lag, const std::string &member); bool removeLagMember(const std::string &lag, const std::string &member); diff --git a/tests/conftest.py b/tests/conftest.py index 6e6939d41c..437190a689 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -412,7 +412,7 @@ def create_servers(self): for i in range(NUM_PORTS): server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i) self.servers.append(server) - + def reset_dbs(self): # DB wrappers are declared here, lazy-loaded in the tests self.app_db = None @@ -1853,7 +1853,8 @@ def dvs_route(request, dvs) -> DVSRoute: @pytest.yield_fixture(scope="class") def dvs_lag_manager(request, dvs): request.cls.dvs_lag = dvs_lag.DVSLag(dvs.get_asic_db(), - dvs.get_config_db()) + dvs.get_config_db(), + dvs) @pytest.yield_fixture(scope="class") @@ -1868,7 +1869,7 @@ def dvs_vlan_manager(request, dvs): def dvs_port_manager(request, dvs): request.cls.dvs_port = dvs_port.DVSPort(dvs.get_asic_db(), dvs.get_config_db()) - + @pytest.yield_fixture(scope="class") def dvs_mirror_manager(request, dvs): request.cls.dvs_mirror = dvs_mirror.DVSMirror(dvs.get_asic_db(), diff --git a/tests/dvslib/dvs_lag.py b/tests/dvslib/dvs_lag.py index 06dd0c4217..3e8c27f11d 100644 --- a/tests/dvslib/dvs_lag.py +++ b/tests/dvslib/dvs_lag.py @@ -1,11 +1,14 @@ +import json + class DVSLag(object): - def __init__(self, adb, cdb): + def __init__(self, adb, cdb, dvs): self.asic_db = adb self.config_db = cdb + self.dvs = dvs - def create_port_channel(self, lag_id, admin_status="up", mtu="1500"): + def create_port_channel(self, lag_id, admin_status="up", mtu="1500", fast_rate=False): lag = "PortChannel{}".format(lag_id) - lag_entry = {"admin_status": admin_status, "mtu": mtu} + lag_entry = {"admin_status": admin_status, "mtu": mtu, "fast_rate": str(fast_rate).lower()} self.config_db.create_entry("PORTCHANNEL", lag, lag_entry) def remove_port_channel(self, lag_id): @@ -27,3 +30,12 @@ def get_and_verify_port_channel_members(self, expected_num): def get_and_verify_port_channel(self, expected_num): return self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", expected_num) + def dump_portchannel(self, lag_id): + lag = "PortChannel{}".format(lag_id) + output = self.dvs.runcmd("teamdctl {} state dump".format(lag))[1] + port_state_dump = json.loads(output) + return port_state_dump + + def get_and_verify_port_channel_fast_rate(self, lag_id, fast_rate): + assert self.dump_portchannel(lag_id)["runner"]["fast_rate"] == fast_rate + diff --git a/tests/test_portchannel.py b/tests/test_portchannel.py index 3e24b6a340..0a922e6936 100644 --- a/tests/test_portchannel.py +++ b/tests/test_portchannel.py @@ -1,3 +1,4 @@ +import pytest import time import re import json @@ -6,6 +7,7 @@ from swsscommon import swsscommon +@pytest.mark.usefixtures('dvs_lag_manager') class TestPortchannel(object): def test_Portchannel(self, dvs, testlog): @@ -89,6 +91,28 @@ def test_Portchannel(self, dvs, testlog): lagms = lagmtbl.getKeys() assert len(lagms) == 0 + @pytest.mark.parametrize("fast_rate", [False, True]) + def test_Portchannel_fast_rate(self, dvs, testlog, fast_rate): + po_id = "0003" + po_member = "Ethernet16" + + # Create PortChannel + self.dvs_lag.create_port_channel(po_id, fast_rate=fast_rate) + self.dvs_lag.get_and_verify_port_channel(1) + + # Add member to PortChannel + self.dvs_lag.create_port_channel_member(po_id, po_member) + self.dvs_lag.get_and_verify_port_channel_members(1) + + # test fast rate configuration + self.dvs_lag.get_and_verify_port_channel_fast_rate(po_id, fast_rate) + + # remove PortChannel + self.dvs_lag.create_port_channel_member(po_id, po_member) + self.dvs_lag.remove_port_channel(po_id) + self.dvs_lag.get_and_verify_port_channel(0) + + def test_Portchannel_lacpkey(self, dvs, testlog): portchannelNamesAuto = [("PortChannel001", "Ethernet0", 1001), ("PortChannel002", "Ethernet4", 1002), @@ -108,7 +132,7 @@ def test_Portchannel_lacpkey(self, dvs, testlog): for portchannel in portchannelNamesAuto: tbl.set(portchannel[0], fvs) - + fvs_no_lacp_key = swsscommon.FieldValuePairs( [("admin_status", "up"), ("mtu", "9100"), ("oper_status", "up")]) tbl.set(portchannelNames[0][0], fvs_no_lacp_key) From 2489ad57bea7ec04c911ee70c793746d3517b9dd Mon Sep 17 00:00:00 2001 From: mint570 <70396898+mint570@users.noreply.github.com> Date: Fri, 5 Aug 2022 09:38:50 -0700 Subject: [PATCH 02/10] Improve pytest speend by grouping 20 tests together. (#2390) *Improve pytest speed by grouping 20 tests together --- .../test-docker-sonic-vs-template.yml | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index ae2df1528d..2dc8e3c567 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -71,16 +71,32 @@ jobs: if [ '${{ parameters.archive_gcov }}' == True ]; then all_tests=$(ls test_*.py) all_tests="${all_tests} p4rt" + test_set=() + # Run 20 tests as a set. for test in ${all_tests}; do - test_name=$(echo "${test}" | cut -d "." -f 1) - sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) ${test} + test_set+=("${test}") + if [ ${#test_set[@]} -ge 20 ]; then + test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) + echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) + container_count=$(docker ps -q -a | wc -l) + if [ ${container_count} -gt 0 ]; then + ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) + docker stop $(docker ps -q -a) + docker rm $(docker ps -q -a) + fi + test_set=() + fi + done + if [ ${#test_set[@]} -gt 0 ]; then + test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) + echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) container_count=$(docker ps -q -a | wc -l) if [ ${container_count} -gt 0 ]; then ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) docker stop $(docker ps -q -a) docker rm $(docker ps -q -a) fi - done + fi else sudo py.test -v --force-flaky --junitxml=tests_tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) fi From 168bd3b32e7a8b484425041d9dbdb8d3dee7c944 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Mon, 8 Aug 2022 08:12:41 -0700 Subject: [PATCH 03/10] [EVPN]Modified tunnel creation logic when creating tunnel in VRF-VNI map creation flow (#2404) Same as PR #2387 - What I did To fix issue sonic-net/sonic-buildimage#11428 Modified the logic of tunnel map creation to create tunnel with tunnel map for vlan-vni map in addition to vrf-vni map when tunnel is created first time in the VRF-VNI map processing flow. Modified the tunnel stats interval to 10 sec Modified the logic to create bridge port for p2mp tunnel only when p2p tunnel is not supported - Why I did it During the configuration phase when VRF-VNI map arrives before VLAN-VNI map, the tunnel is created without a tunnel map for vlan-vni membership. This is problematic when VLAN to VNI map arrives later, tunnel map entry cannot be created since the tunnel map doesn't exist and its a create only attribute in SAI. - How I verified it Modified UT to add VRF-VNI map first and VLAN-VLAN map later --- orchagent/vxlanorch.cpp | 140 +++++++++++-------------------- orchagent/vxlanorch.h | 3 +- tests/evpn_tunnel.py | 75 ++++++++--------- tests/test_evpn_l3_vxlan_p2mp.py | 22 ++--- tests/test_evpn_tunnel_p2mp.py | 8 +- 5 files changed, 100 insertions(+), 148 deletions(-) diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 7850727bd2..1995675fdd 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -494,67 +494,6 @@ VxlanTunnel::~VxlanTunnel() src_creation_, false); } -bool VxlanTunnel::createTunnel(MAP_T encap, MAP_T decap, uint8_t encap_ttl) -{ - try - { - VxlanTunnelOrch* tunnel_orch = gDirectory.get(); - sai_ip_address_t ips, ipd, *ip=nullptr; - uint8_t mapper_list = 0; - swss::copy(ips, src_ip_); - - // Only a single mapper type is created - - if (decap == MAP_T::VNI_TO_BRIDGE) - { - TUNNELMAP_SET_BRIDGE(mapper_list); - } - else if (decap == MAP_T::VNI_TO_VLAN_ID) - { - TUNNELMAP_SET_VLAN(mapper_list); - } - else - { - TUNNELMAP_SET_VRF(mapper_list); - } - - createMapperHw(mapper_list, (encap == MAP_T::MAP_TO_INVALID) ? - TUNNEL_MAP_USE_DECAP_ONLY: TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); - - if (encap != MAP_T::MAP_TO_INVALID) - { - ip = &ips; - } - - ids_.tunnel_id = create_tunnel(&ids_, ip, NULL, gUnderlayIfId, false, encap_ttl); - - if (ids_.tunnel_id != SAI_NULL_OBJECT_ID) - { - tunnel_orch->addTunnelToFlexCounter(ids_.tunnel_id, tunnel_name_); - } - - ip = nullptr; - if (!dst_ip_.isZero()) - { - swss::copy(ipd, dst_ip_); - ip = &ipd; - } - - ids_.tunnel_term_id = create_tunnel_termination(ids_.tunnel_id, ips, ip, gVirtualRouterId); - active_ = true; - tunnel_map_ = { encap, decap }; - } - catch (const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error creating tunnel %s: %s", tunnel_name_.c_str(), error.what()); - // FIXME: add code to remove already created objects - return false; - } - - SWSS_LOG_NOTICE("Vxlan tunnel '%s' was created", tunnel_name_.c_str()); - return true; -} - sai_object_id_t VxlanTunnel::addEncapMapperEntry(sai_object_id_t obj, uint32_t vni, tunnel_map_type_t type) { const auto encap_id = getEncapMapId(type); @@ -1924,20 +1863,18 @@ bool VxlanTunnel::isTunnelReferenced() Port tunnelPort; bool dip_tunnels_used = tunnel_orch->isDipTunnelsSupported(); - ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); - if (!ret) - { - SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); - return false; - } - - if (dip_tunnels_used) { return (getDipTunnelCnt() != 0); } else { + ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); + return false; + } if (tunnelPort.m_fdb_count != 0) { return true; @@ -2013,19 +1950,21 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) if (!tunnel_obj->isActive()) { //@Todo, currently only decap mapper is allowed - //tunnel_obj->createTunnel(MAP_T::MAP_TO_INVALID, MAP_T::VNI_TO_VLAN_ID); uint8_t mapper_list = 0; TUNNELMAP_SET_VLAN(mapper_list); TUNNELMAP_SET_VRF(mapper_list); tunnel_obj->createTunnelHw(mapper_list,TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); - Port tunPort; - auto src_vtep = tunnel_obj->getSrcIP().to_string(); - if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + if (!tunnel_orch->isDipTunnelsSupported()) { - auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); - gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); - gPortsOrch->getPort(port_tunnel_name,tunPort); - gPortsOrch->addBridgePort(tunPort); + Port tunPort; + auto src_vtep = tunnel_obj->getSrcIP().to_string(); + if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + { + auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); + gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); + gPortsOrch->getPort(port_tunnel_name,tunPort); + gPortsOrch->addBridgePort(tunPort); + } } } @@ -2117,26 +2056,27 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); bool ret; - ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); // If there are Dynamic DIP Tunnels referring to this SIP Tunnel // then mark it as pending for delete. if (!tunnel_obj->isTunnelReferenced()) { - if (!ret) + if (!tunnel_orch->isDipTunnelsSupported()) { - SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); - return true; + ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); + return true; + } + ret = gPortsOrch->removeBridgePort(tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Remove Bridge port failed for source vtep = %s fdbcount = %d", + port_tunnel_name.c_str(), tunnelPort.m_fdb_count); + return true; + } + gPortsOrch->removeTunnel(tunnelPort); } - ret = gPortsOrch->removeBridgePort(tunnelPort); - if (!ret) - { - SWSS_LOG_ERROR("Remove Bridge port failed for source vtep = %s fdbcount = %d", - port_tunnel_name.c_str(), tunnelPort.m_fdb_count); - return true; - } - - gPortsOrch->removeTunnel(tunnelPort); - uint8_t mapper_list=0; TUNNELMAP_SET_VLAN(mapper_list); TUNNELMAP_SET_VRF(mapper_list); @@ -2152,6 +2092,7 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) } else { + gPortsOrch->getPort(port_tunnel_name, tunnelPort); SWSS_LOG_WARN("Postponing the SIP Tunnel HW deletion Remote reference count = %d", gPortsOrch->getBridgePortReferenceCount(tunnelPort)); } @@ -2218,7 +2159,22 @@ bool VxlanVrfMapOrch::addOperation(const Request& request) { if (!tunnel_obj->isActive()) { - tunnel_obj->createTunnel(MAP_T::VRID_TO_VNI, MAP_T::VNI_TO_VRID); + uint8_t mapper_list = 0; + TUNNELMAP_SET_VLAN(mapper_list); + TUNNELMAP_SET_VRF(mapper_list); + tunnel_obj->createTunnelHw(mapper_list,TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); + if (!tunnel_orch->isDipTunnelsSupported()) + { + Port tunPort; + auto src_vtep = tunnel_obj->getSrcIP().to_string(); + if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + { + auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); + gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); + gPortsOrch->getPort(port_tunnel_name,tunPort); + gPortsOrch->addBridgePort(tunPort); + } + } } vrf_id = vrf_orch->getVRFid(vrf_name); } diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 0b56e76faa..9529a86ce7 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -37,7 +37,7 @@ typedef enum #define IS_TUNNELMAP_SET_BRIDGE(x) ((x)& (1< 0, "Bridgeport entry not created" - assert len(ret) == 1, "More than 1 bridgeport entry created" - self.bridgeport_map[src_ip] = ret[0] + if not ignore_bp: + expected_bridgeport_attributes = { + 'SAI_BRIDGE_PORT_ATTR_TYPE': 'SAI_BRIDGE_PORT_TYPE_TUNNEL', + 'SAI_BRIDGE_PORT_ATTR_TUNNEL_ID': tunnel_id, + 'SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE': 'SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE', + 'SAI_BRIDGE_PORT_ATTR_ADMIN_STATE': 'true', + } + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_BRIDGE_PORT, expected_bridgeport_attributes) + assert len(ret) > 0, "Bridgeport entry not created" + assert len(ret) == 1, "More than 1 bridgeport entry created" + self.bridgeport_map[src_ip] = ret[0] + self.tunnel_map_ids.update(tunnel_map_id) self.tunnel_ids.add(tunnel_id) self.tunnel_term_ids.add(tunnel_term_id) @@ -797,37 +798,33 @@ def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): def check_vxlan_tunnel_vrf_map_entry(self, dvs, tunnel_name, vrf_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - if (self.tunnel_map_map.get(tunnel_name) is None): - tunnel_map_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - else: - tunnel_map_id = self.tunnel_map_map[tunnel_name] - tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 3) # check that the vxlan tunnel termination are there assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 3), "The TUNNEL_MAP_ENTRY is created too early" - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, { 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vrf_name].get('ing'), 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, } ) - self.tunnel_map_vrf_entry_ids.update(tunnel_map_entry_id[1]) + assert len(ret) == 1, "Invalid number of tunnel map entries for SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI" + + self.tunnel_map_vrf_entry_ids.update(ret[0]) - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[2], + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, { 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vrf_name].get('egr'), } ) - self.tunnel_map_vrf_entry_ids.update(tunnel_map_entry_id[2]) + assert len(ret) == 1, "Invalid number of tunnel map entries for SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID" + self.tunnel_map_vrf_entry_ids.update(ret[0]) self.tunnel_map_entry_ids.update(tunnel_map_entry_id) def check_vxlan_tunnel_vrf_map_entry_remove(self, dvs, tunnel_name, vrf_name, vni_id): diff --git a/tests/test_evpn_l3_vxlan_p2mp.py b/tests/test_evpn_l3_vxlan_p2mp.py index c704cb3788..1bff4cce1e 100644 --- a/tests/test_evpn_l3_vxlan_p2mp.py +++ b/tests/test_evpn_l3_vxlan_p2mp.py @@ -37,12 +37,12 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) - print ("\tCreate Vlan-VNI map and VRF-VNI map") - vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') - vxlan_obj.create_vrf(dvs, "Vrf-RED") vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + print ("\tCreate Vlan-VNI map and VRF-VNI map") + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + print ("\tTesting VRF-VNI map in APP DB") vlanlist = ['100'] vnilist = ['1000'] @@ -67,7 +67,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print ("\tTesting Tunnel Vlan VNI Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -88,7 +88,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vlan(dvs, "100") @@ -141,7 +141,7 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print ("\tTesting Tunnel Vlan Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -179,7 +179,7 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") @@ -233,7 +233,7 @@ def test_remote_ipv4_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print ("\tTesting Tunnel Vlan Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -380,7 +380,7 @@ def test_remote_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") @@ -436,7 +436,7 @@ def test_remote_ipv6_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print ("\tTesting Tunnel Vlan Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -584,7 +584,7 @@ def test_remote_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") diff --git a/tests/test_evpn_tunnel_p2mp.py b/tests/test_evpn_tunnel_p2mp.py index f2b3e62cea..22f12f0beb 100644 --- a/tests/test_evpn_tunnel_p2mp.py +++ b/tests/test_evpn_tunnel_p2mp.py @@ -30,7 +30,7 @@ def test_p2mp_tunnel(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] print("Testing SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print("Testing Tunnel Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -43,7 +43,7 @@ def test_p2mp_tunnel(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) # Test 2 - Vlan extension Tests def test_vlan_extension(self, dvs, testlog): @@ -62,7 +62,7 @@ def test_vlan_extension(self, dvs, testlog): vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102') - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -121,4 +121,4 @@ def test_vlan_extension(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) From 1e1438e46ea8e85cffce283935c97c45bfde8dc2 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 9 Aug 2022 00:57:04 +0800 Subject: [PATCH 04/10] [portsorch] Expose supported FEC modes to STABE_DB and check whether FEC mode is supported before setting it (#2400) * Revert "Revert "[portsorch] Expose supported FEC modes to STABE_DB and check whether FEC mode is supported before setting it (#2333)" (#2396)" This reverts commit 6565b502f01e739b4961f216084ffedf78395640. * Adjust the prototype of setPortFec Signed-off-by: Stephen Sun --- orchagent/p4orch/tests/fake_portorch.cpp | 2 +- orchagent/portsorch.cpp | 120 ++++++++++-- orchagent/portsorch.h | 8 +- tests/mock_tests/portsorch_ut.cpp | 227 +++++++++++++++++++++++ 4 files changed, 344 insertions(+), 13 deletions(-) diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index 1cd3e20ae3..0762dd0357 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -518,7 +518,7 @@ bool PortsOrch::getPortPvid(Port &port, sai_uint32_t &pvid) return true; } -bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) +bool PortsOrch::setPortFec(Port &port, std::string &mode) { return true; } diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 3a0b4e75f0..26219d4d35 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -79,6 +79,13 @@ static map fec_mode_map = { "fc", SAI_PORT_FEC_MODE_FC } }; +static map fec_mode_reverse_map = +{ + { SAI_PORT_FEC_MODE_NONE, "none" }, + { SAI_PORT_FEC_MODE_RS, "rs" }, + { SAI_PORT_FEC_MODE_FC, "fc" } +}; + static map pfc_asym_map = { { "on", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, @@ -1214,19 +1221,31 @@ bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) return true; } - -bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) +bool PortsOrch::setPortFec(Port &port, string &mode) { SWSS_LOG_ENTER(); + auto searchRef = m_portSupportedFecModes.find(port.m_port_id); + if (searchRef != m_portSupportedFecModes.end()) + { + auto &supportedFecModes = searchRef->second; + if (!supportedFecModes.empty() && (supportedFecModes.find(mode) == supportedFecModes.end())) + { + SWSS_LOG_ERROR("Unsupported mode %s on port %s", mode.c_str(), port.m_alias.c_str()); + // We return true becase the caller will keep the item in m_toSync and retry it later if we return false + // As the FEC mode is not supported it doesn't make sense to retry. + return true; + } + } + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = mode; + attr.value.s32 = port.m_fec_mode; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + SWSS_LOG_ERROR("Failed to set FEC mode %s to port %s", mode.c_str(), port.m_alias.c_str()); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1234,7 +1253,7 @@ bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) } } - SWSS_LOG_INFO("Set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + SWSS_LOG_NOTICE("Set port %s FEC mode %s", port.m_alias.c_str(), mode.c_str()); setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &mode); @@ -2011,6 +2030,7 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ m_portStateTable.set(alias, v); } + void PortsOrch::initPortCapAutoNeg(Port &port) { sai_status_t status; @@ -2040,6 +2060,87 @@ void PortsOrch::initPortCapLinkTraining(Port &port) SWSS_LOG_WARN("Unable to get %s LT support capability", port.m_alias.c_str()); } +void PortsOrch::getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes) +{ + sai_attribute_t attr; + sai_status_t status; + vector fecModes(fec_mode_reverse_map.size()); + + attr.id = SAI_PORT_ATTR_SUPPORTED_FEC_MODE; + attr.value.s32list.count = static_cast(fecModes.size()); + attr.value.s32list.list = fecModes.data(); + + status = sai_port_api->get_port_attribute(port_id, 1, &attr); + fecModes.resize(attr.value.s32list.count); + if (status == SAI_STATUS_SUCCESS) + { + if (fecModes.empty()) + { + supported_fecmodes.insert("N/A"); + } + else + { + for(auto fecMode : fecModes) + { + supported_fecmodes.insert(fec_mode_reverse_map[static_cast(fecMode)]); + } + } + } + else + { + if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) || + status == SAI_STATUS_NOT_IMPLEMENTED) + { + // unable to validate FEC mode if attribute is not supported on platform + SWSS_LOG_NOTICE("Unable to validate FEC mode for port %s id=%" PRIx64 " due to unsupported by platform", + alias.c_str(), port_id); + } + else + { + SWSS_LOG_ERROR("Failed to get a list of supported FEC modes for port %s id=%" PRIx64 ". Error=%d", + alias.c_str(), port_id, status); + } + + supported_fecmodes.clear(); // return empty + } +} + +void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id) +{ + // If port supported speeds map already contains the information, save the SAI call + if (m_portSupportedFecModes.count(port_id)) + { + return; + } + PortSupportedFecModes supported_fec_modes; + getPortSupportedFecModes(alias, port_id, supported_fec_modes); + m_portSupportedFecModes[port_id] = supported_fec_modes; + + if (supported_fec_modes.empty()) + { + // Do not expose "supported_fecs" in case fetching FEC modes is not supported by the vendor + SWSS_LOG_INFO("No supported_fecs exposed to STATE_DB for port %s since fetching supported FEC modes is not supported by the vendor", + alias.c_str()); + return; + } + + vector v; + std::string supported_fec_modes_str; + bool first = true; + for(auto fec : supported_fec_modes) + { + if (first) + first = false; + else + supported_fec_modes_str += ','; + supported_fec_modes_str += fec; + } + + v.emplace_back(std::make_pair("supported_fecs", supported_fec_modes_str)); + m_portStateTable.set(alias, v); +} + /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ @@ -3105,6 +3206,7 @@ void PortsOrch::doPortTask(Consumer &consumer) } initPortSupportedSpeeds(get<0>(it->second), m_portListLaneMap[it->first]); + initPortSupportedFecModes(get<0>(it->second), m_portListLaneMap[it->first]); it++; } @@ -3520,14 +3622,12 @@ void PortsOrch::doPortTask(Consumer &consumer) p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, p.m_fec_mode)) + if (setPortFec(p, fec_mode)) { m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } @@ -3537,14 +3637,12 @@ void PortsOrch::doPortTask(Consumer &consumer) /* Port is already down, setting fec mode*/ p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, p.m_fec_mode)) + if (setPortFec(p, fec_mode)) { m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 42e17d7fb9..28e576e906 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -27,6 +27,7 @@ #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; +typedef std::set PortSupportedFecModes; static const map oper_status_strings = { @@ -209,6 +210,8 @@ class PortsOrch : public Orch, public Subject unique_ptr m_gbcounterTable; std::map m_portSupportedSpeeds; + // Supported FEC modes on the system side. + std::map m_portSupportedFecModes; bool m_initDone = false; Port m_cpuPort; @@ -311,7 +314,7 @@ class PortsOrch : public Orch, public Subject bool setPortTpid(sai_object_id_t id, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); bool getPortPvid(Port &port, sai_uint32_t &pvid); - bool setPortFec(Port &port, sai_port_fec_mode_t mode); + bool setPortFec(Port &port, std::string &mode); bool setPortPfcAsym(Port &port, string pfc_asym); bool getDestPortId(sai_object_id_t src_port_id, dest_port_type_t port_type, sai_object_id_t &des_port_id); @@ -320,6 +323,9 @@ class PortsOrch : public Orch, public Subject bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); + // Get supported FEC modes on system side + void getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes); + void initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id); task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 78c633d4a1..93e73d9041 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -20,6 +20,70 @@ namespace portsorch_test using namespace std; + sai_port_api_t ut_sai_port_api; + sai_port_api_t *pold_sai_port_api; + + bool not_support_fetching_fec; + vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; + + sai_status_t _ut_stub_sai_get_port_attribute( + _In_ sai_object_id_t port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + sai_status_t status; + if (attr_count == 1 && attr_list[0].id == SAI_PORT_ATTR_SUPPORTED_FEC_MODE) + { + if (not_support_fetching_fec) + { + status = SAI_STATUS_NOT_IMPLEMENTED; + } + else + { + uint32_t i; + for (i = 0; i < attr_list[0].value.s32list.count && i < mock_port_fec_modes.size(); i++) + { + attr_list[0].value.s32list.list[i] = mock_port_fec_modes[i]; + } + attr_list[0].value.s32list.count = i; + status = SAI_STATUS_SUCCESS; + } + } + else + { + status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); + } + return status; + } + + uint32_t _sai_set_port_fec_count; + int32_t _sai_port_fec_mode; + sai_status_t _ut_stub_sai_set_port_attribute( + _In_ sai_object_id_t port_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_PORT_ATTR_FEC_MODE) + { + _sai_set_port_fec_count++; + _sai_port_fec_mode = attr[0].value.s32; + } + return pold_sai_port_api->set_port_attribute(port_id, attr); + } + + void _hook_sai_port_api() + { + ut_sai_port_api = *sai_port_api; + pold_sai_port_api = sai_port_api; + ut_sai_port_api.get_port_attribute = _ut_stub_sai_get_port_attribute; + ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + sai_port_api = &ut_sai_port_api; + } + + void _unhook_sai_port_api() + { + sai_port_api = pold_sai_port_api; + } + struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -173,6 +237,169 @@ namespace portsorch_test }; + TEST_F(PortsOrchTest, PortSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "none"} + }}); + consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + _unhook_sai_port_api(); + } + + /* + * Test case: SAI_PORT_ATTR_SUPPORTED_FEC_MODE is not supported by vendor + **/ + TEST_F(PortsOrchTest, PortNotSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + /* + * Test case: Fetching SAI_PORT_ATTR_SUPPORTED_FEC_MODE is supported but no FEC mode is supported on the port + **/ + TEST_F(PortsOrchTest, PortSupportNoFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + auto old_mock_port_fec_modes = mock_port_fec_modes; + mock_port_fec_modes.clear(); + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + mock_port_fec_modes = old_mock_port_fec_modes; + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); From 9f2e27b4740ae5395d3691ce8a0199e5fb35119c Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 9 Aug 2022 03:06:01 +0800 Subject: [PATCH 05/10] [QoS] Fix issue: the WRED profile can not be set if current min > new max or current max < new min (#2379) What I did Fix issue: Setting a WRED profile can fail in case the current min threshold is greater than the new max threshold or the current max threshold is less than the new min threshold for any color and at any time. Eg. Current min=1M, max=2M, new min=3M, new max=4M The min threshold is set first, so current max (2M) < new min (3M), which violates the condition This is because there can be only one attribute in each SAI SET operation, which means the vendor SAI does not understand the whole information of the new attributes and can only perform the sanity check against each SET operation. Signed-off-by: Stephen Sun stephens@nvidia.com Why I did it Fix the issue How I verified it Manual test and mock test. Details if related The fix The thresholds that have been applied to SAI will be stored in orchagent. The original logic is to handle each attribute to be set and append it to an attribute list. To resolve the issue, a deferred attribute list is introduced and will be appended to the original attribute list after all the attributes have been handled. In the new logic, each threshold to be set will be checked against the stored data. In case it violates the condition, the violating attribute will be deferred, done via putting it into the deferred attributes list. For any color, there can be only 1 threshold violating the condition. Otherwise, it means both new min > old max and new max > old min, which means either old max < old min or new max < new min, which means either old or new data is illegal. This can not happen because illegal data will not be applied and stored. By doing so, the other threshold will be applied first, which extends the threshold range and breaks the violating condition. A logic is also introduced to guarantee the min threshold is always less than the max threshold in the new profile to be set. For the above example, In the new logic, the min threshold will be deferred so the new max threshold will be applied first and then the new min is applied. In this flow, there is no violation at any time. min = 1M, max = 2M => min = 1M, max = 4M => min = 3M, max = 4M --- orchagent/qosorch.cpp | 141 +++++++++++-- orchagent/qosorch.h | 18 ++ tests/mock_tests/mock_orchagent_main.h | 2 +- tests/mock_tests/qosorch_ut.cpp | 267 +++++++++++++++++++++++++ 4 files changed, 409 insertions(+), 19 deletions(-) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 274003bd72..14ca851e76 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -489,47 +489,140 @@ bool WredMapHandler::convertBool(string str, bool &val) return true; } +void WredMapHandler::appendThresholdToAttributeList(sai_attr_id_t type, + sai_uint32_t threshold, + bool needDefer, + vector &normalQueue, + vector &deferredQueue, + sai_uint32_t &newThreshold) +{ + sai_attribute_t attr; + + attr.id = type; + attr.value.u32 = threshold; + if (needDefer) + { + deferredQueue.push_back(attr); + } + else + { + normalQueue.push_back(attr); + } + newThreshold = threshold; +} + +WredMapHandler::qos_wred_thresholds_store_t WredMapHandler::m_wredProfiles; + bool WredMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attribs) { SWSS_LOG_ENTER(); sai_attribute_t attr; + vector deferred_attributes; + auto &key = kfvKey(tuple); + auto &storedProfile = WredMapHandler::m_wredProfiles[key]; + qos_wred_thresholds_t currentProfile = storedProfile; + sai_uint32_t threshold; + + /* + * Setting WRED profile can fail in case + * - the current min threshold is greater than the new max threshold + * - or the current max threshold is less than the new min threshold + * for any color at any time, on some vendor's platforms. + * + * The root cause + * There can be only one attribute in each SAI SET operation, which means + * the vendor SAI do not have a big picture regarding what attributes are being set + * and can only perform the sanity check against each SET operation. + * In the above case, the sanity check will fail. + * + * The fix + * The thresholds that have been applied to SAI will be stored in orchagent. + * + * The original logic is to handle each attribute to be set and append it to an attribute list. + * To resolve the issue, a 2nd half attribute list is introduced and + * will be appended to the original attribute list after all the attributes have been handled. + * + * In the new logic, each threshold to be set will be checked against the stored data. + * In case it violates the condition, the violating attribute will be deferred, done via putting it into the 2nd half attributes list. + * + * For any color, there can be only 1 threshold violating the condition. + * Otherwise, it means both new min > old max and new max > old min, which means either old max < old min or new max < new min, + * which means either old or new data is illegal. + * This can not happen because illegal data can not be applied and stored. + * + * By doing so, the other threshold will be applied first, which extends the threshold range and breaks the violating condition. + * A logic is also introduced to guarantee the min threshold is always less than the max threshold in the new profile to be set. + * + * For example: + * Current min=1M, max=2M, new min=3M, new max=4M + * The min is set first, so current max (2M) < new min (3M), which violates the condition + * By the new logic, min threshold will be deferred so the new max will be applied first and then the new min is applied and no violating. + * min = 1M, max = 2M + * => min = 1M, max = 4M + * => min = 3M, max = 4M + */ + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++) { if (fvField(*i) == yellow_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD, + threshold, + (storedProfile.yellow_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.yellow_max_threshold); } else if (fvField(*i) == yellow_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD, + threshold, + (storedProfile.yellow_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.yellow_min_threshold); } else if (fvField(*i) == green_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_GREEN_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_GREEN_MAX_THRESHOLD, + threshold, + (storedProfile.green_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.green_max_threshold); } else if (fvField(*i) == green_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_GREEN_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_GREEN_MIN_THRESHOLD, + threshold, + (storedProfile.green_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.green_min_threshold); } else if (fvField(*i) == red_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_RED_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_RED_MAX_THRESHOLD, + threshold, + (storedProfile.red_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.red_max_threshold); } else if (fvField(*i) == red_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_RED_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_RED_MIN_THRESHOLD, + threshold, + (storedProfile.red_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.red_min_threshold); } else if (fvField(*i) == green_drop_probability_field_name) { @@ -588,6 +681,18 @@ bool WredMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tupl return false; } } + + if ((currentProfile.green_min_threshold > currentProfile.green_max_threshold) + || (currentProfile.yellow_min_threshold > currentProfile.yellow_max_threshold) + || (currentProfile.red_min_threshold > currentProfile.red_max_threshold)) + { + SWSS_LOG_ERROR("Wrong wred profile: min threshold is greater than max threshold"); + return false; + } + + attribs.insert(attribs.end(), deferred_attributes.begin(), deferred_attributes.end()); + storedProfile = currentProfile; + return true; } diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index f8b97cd381..b5e2e1ad86 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -112,6 +112,24 @@ class WredMapHandler : public QosMapHandler protected: bool convertEcnMode(string str, sai_ecn_mark_mode_t &ecn_val); bool convertBool(string str, bool &val); +private: + void appendThresholdToAttributeList(sai_attr_id_t type, + sai_uint32_t threshold, + bool needDefer, + vector &normalQueue, + vector &deferredQueue, + sai_uint32_t &newThreshold); + typedef struct { + sai_uint32_t green_max_threshold; + sai_uint32_t green_min_threshold; + sai_uint32_t yellow_max_threshold; + sai_uint32_t yellow_min_threshold; + sai_uint32_t red_max_threshold; + sai_uint32_t red_min_threshold; + } qos_wred_thresholds_t; + typedef map qos_wred_thresholds_store_t; + + static qos_wred_thresholds_store_t m_wredProfiles; }; diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index f41c5b29a5..0acba4ef1c 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -12,8 +12,8 @@ #include "mirrororch.h" #define private public #include "bufferorch.h" -#undef private #include "qosorch.h" +#undef private #include "vrforch.h" #include "vnetorch.h" #include "vxlanorch.h" diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 7461a29e3c..13454cee56 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -23,11 +23,14 @@ namespace qosorch_test int sai_remove_qos_map_count; int sai_remove_wred_profile_count; int sai_remove_scheduler_count; + int sai_set_wred_attribute_count; sai_object_id_t switch_dscp_to_tc_map_id; sai_remove_scheduler_fn old_remove_scheduler; sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; + sai_create_wred_fn old_create_wred; sai_remove_wred_fn old_remove_wred; + sai_set_wred_attribute_fn old_set_wred_attribute; sai_wred_api_t ut_sai_wred_api, *pold_sai_wred_api; sai_remove_qos_map_fn old_remove_qos_map; sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; @@ -50,6 +53,102 @@ namespace qosorch_test return rc; } + bool testing_wred_thresholds; + WredMapHandler::qos_wred_thresholds_t saiThresholds; + void _ut_stub_sai_check_wred_attributes(const sai_attribute_t &attr) + { + if (!testing_wred_thresholds) + { + return; + } + + switch (attr.id) + { + case SAI_WRED_ATTR_GREEN_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.green_min_threshold || saiThresholds.green_min_threshold < attr.value.u32); + saiThresholds.green_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_GREEN_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.green_max_threshold || saiThresholds.green_max_threshold > attr.value.u32); + saiThresholds.green_min_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.yellow_min_threshold || saiThresholds.yellow_min_threshold < attr.value.u32); + saiThresholds.yellow_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.yellow_max_threshold || saiThresholds.yellow_max_threshold > attr.value.u32); + saiThresholds.yellow_min_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.red_min_threshold || saiThresholds.red_min_threshold < attr.value.u32); + saiThresholds.red_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.red_max_threshold || saiThresholds.red_max_threshold > attr.value.u32); + saiThresholds.red_min_threshold = attr.value.u32; + break; + default: + break; + } + } + + void checkWredProfileEqual(const string &name, WredMapHandler::qos_wred_thresholds_t &thresholds) + { + auto &oaThresholds = WredMapHandler::m_wredProfiles[name]; + + ASSERT_EQ(oaThresholds.green_min_threshold, thresholds.green_min_threshold); + ASSERT_EQ(oaThresholds.green_max_threshold, thresholds.green_max_threshold); + ASSERT_EQ(oaThresholds.yellow_min_threshold, thresholds.yellow_min_threshold); + ASSERT_EQ(oaThresholds.yellow_max_threshold, thresholds.yellow_max_threshold); + ASSERT_EQ(oaThresholds.red_min_threshold, thresholds.red_min_threshold); + ASSERT_EQ(oaThresholds.red_max_threshold, thresholds.red_max_threshold); + } + + void updateWredProfileAndCheck(vector &thresholdsVector, WredMapHandler::qos_wred_thresholds_t &thresholdsValue) + { + std::deque entries; + entries.push_back({"AZURE", "SET", thresholdsVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + checkWredProfileEqual("AZURE", saiThresholds); + checkWredProfileEqual("AZURE", thresholdsValue); + } + + void updateWrongWredProfileAndCheck(vector &thresholdsVector) + { + std::deque entries; + vector ts; + entries.push_back({"AZURE", "SET", thresholdsVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + auto current_sai_wred_set_count = sai_set_wred_attribute_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_wred_set_count, sai_set_wred_attribute_count); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + } + + sai_status_t _ut_stub_sai_create_wred( + _Out_ sai_object_id_t *wred_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + auto rc = old_create_wred(wred_id, switch_id, attr_count, attr_list); + if (rc == SAI_STATUS_SUCCESS) + { + for (uint32_t i = 0; i < attr_count; i++) + { + _ut_stub_sai_check_wred_attributes(attr_list[i]); + } + } + return rc; + } + sai_status_t _ut_stub_sai_remove_wred(sai_object_id_t wred_id) { auto rc = old_remove_wred(wred_id); @@ -58,6 +157,19 @@ namespace qosorch_test return rc; } + sai_status_t _ut_stub_sai_set_wred_attribute( + _In_ sai_object_id_t wred_id, + _In_ const sai_attribute_t *attr) + { + auto rc = old_set_wred_attribute(wred_id, attr); + if (rc == SAI_STATUS_SUCCESS) + { + _ut_stub_sai_check_wred_attributes(*attr); + } + sai_set_wred_attribute_count++; + return rc; + } + sai_status_t _ut_stub_sai_remove_scheduler(sai_object_id_t scheduler_id) { auto rc = old_remove_scheduler(scheduler_id); @@ -133,6 +245,13 @@ namespace qosorch_test ReplaceSaiRemoveApi(sai_wred_api, ut_sai_wred_api, pold_sai_wred_api, _ut_stub_sai_remove_wred, sai_wred_api->remove_wred, old_remove_wred, ut_sai_wred_api.remove_wred); + // Mock other wred APIs + old_create_wred = pold_sai_wred_api->create_wred; + ut_sai_wred_api.create_wred = _ut_stub_sai_create_wred; + old_set_wred_attribute = pold_sai_wred_api->set_wred_attribute; + ut_sai_wred_api.set_wred_attribute = _ut_stub_sai_set_wred_attribute; + + // Mock switch API pold_sai_switch_api = sai_switch_api; ut_sai_switch_api = *pold_sai_switch_api; old_set_switch_attribute_fn = pold_sai_switch_api->set_switch_attribute; @@ -1070,4 +1189,152 @@ namespace qosorch_test ASSERT_EQ(ts.size(), 1); ts.clear(); } + + /* + * There are 4 ECN ranges + * ------------------------------------------------------------------------------- + * profile lower min=1M max=2M + * profile upper min=3M max=4M + * proile middle min=1.5M max=1.5M + * ------------------------------------------------------------------------------- + * Test step Test case + * 1. Initialize a wred profile with value lower set Wred profile intialization + * 2. Update the value to upper set The new min threshold is greater than the current max threshold + * 3. Update the value back to lower set The new max threshold is less than the current min threshold + * 4. Update the value to middle set Normal case to ensure nothing broken + * 5. Update the value back to lower set Normal case to ensure nothing broken + */ + TEST_F(QosOrchTest, QosOrchTestWredThresholdsTest) + { + testing_wred_thresholds = true; + + // The order of fields matters when the wred profile is updated from the upper set to the lower set + // It should be max, min for each color. In this order, the new max is less then the current min + // QoS orchagent should guarantee that the new min is configured first and then new max + vector lowerSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097153"}, + {"yellow_min_threshold", "1048577"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097154"}, + {"red_min_threshold", "1048578"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t lowerThresholds = { + 2097152, //green_max_threshold + 1048576, //green_min_threshold + 2097153, //yellow_max_threshold + 1048577, //yellow_min_threshold + 2097154, //red_max_threshold + 1048578 //red_min_threshold + }; + // The order of fields matters when the wred profile is updated from the lower set to the upper set + // It should be min, max for each color, in which the new min is larger then the current max + // QoS orchagent should guarantee that the new max is configured first and then new min + vector upperSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "3145728"}, + {"green_max_threshold", "4194304"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "3145729"}, + {"yellow_max_threshold", "4194305"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "3145730"}, + {"red_max_threshold", "4194306"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t upperThresholds = { + 4194304, //green_max_threshold + 3145728, //green_min_threshold + 4194305, //yellow_max_threshold + 3145729, //yellow_min_threshold + 4194306, //red_max_threshold + 3145730 //red_min_threshold + }; + // Order doesn't matter. + vector middleSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "1572864"}, + {"green_max_threshold", "2621440"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "1572865"}, + {"yellow_max_threshold", "2621441"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "1572866"}, + {"red_max_threshold", "2621442"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t middleThresholds = { + 2621440, //green_max_threshold + 1572864, //green_min_threshold + 2621441, //yellow_max_threshold + 1572865, //yellow_min_threshold + 2621442, //red_max_threshold + 1572866 //red_min_threshold + }; + + // Wrong profile + vector greenWrongVector = { + {"ecn", "ecn_green"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "2621440"}, + {"green_max_threshold", "1572864"}, + {"wred_green_enable", "true"} + }; + + vector yellowWrongVector = { + {"ecn", "ecn_yellow"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "2621441"}, + {"yellow_max_threshold", "1572865"}, + {"wred_yellow_enable", "true"} + }; + + vector redWrongVector = { + {"ecn", "ecn_red"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "2621442"}, + {"red_max_threshold", "1572866"}, + {"wred_red_enable", "true"} + }; + + std::deque entries; + // 1. Initialize + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // 2. Update the thresholds from the lower set to the upper set + updateWredProfileAndCheck(upperSetVector, upperThresholds); + + // 3. Update the thresholds from the upper set back to the lower set + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // 4. Update the thresholds from the lower set to the middle set + updateWredProfileAndCheck(middleSetVector, middleThresholds); + + // 5. Update the thresholds from the middle set back to the lower set + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // Wrong parameters + updateWrongWredProfileAndCheck(greenWrongVector); + updateWrongWredProfileAndCheck(yellowWrongVector); + updateWrongWredProfileAndCheck(redWrongVector); + + // Make sure the profiles in orchagent and SAI are not updated by the wrong profile + checkWredProfileEqual("AZURE", saiThresholds); + checkWredProfileEqual("AZURE", lowerThresholds); + + testing_wred_thresholds = false; + } } From d36c17d62c17b7ae2617868989891bc7bb8c72f8 Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Tue, 9 Aug 2022 03:06:24 +0300 Subject: [PATCH 06/10] [asan][aclorch] fix a memory leak in the SaiAttrWrapper::swap() (#2382) * fix a leak caused by overriding this->m_attr (which contained a dynamically allocated list) in the SaiAttrWrapper::swap() --- orchagent/saiattr.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/orchagent/saiattr.cpp b/orchagent/saiattr.cpp index 1c24489ed5..fb1d320fe4 100644 --- a/orchagent/saiattr.cpp +++ b/orchagent/saiattr.cpp @@ -66,12 +66,10 @@ sai_attr_id_t SaiAttrWrapper::getAttrId() const void SaiAttrWrapper::swap(SaiAttrWrapper&& other) { - m_objectType = other.m_objectType; - m_meta = other.m_meta; - m_attr = other.m_attr; - m_serializedAttr = other.m_serializedAttr; - other.m_attr = sai_attribute_t{}; - other.m_serializedAttr.clear(); + std::swap(m_objectType, other.m_objectType); + std::swap(m_meta, other.m_meta); + std::swap(m_attr, other.m_attr); + std::swap(m_serializedAttr, other.m_serializedAttr); } void SaiAttrWrapper::init( From 2cb70a320173a44a4b1e2006cc45ee658f03d83e Mon Sep 17 00:00:00 2001 From: Nikola Dancejic <26731235+Ndancejic@users.noreply.github.com> Date: Tue, 9 Aug 2022 09:24:39 -0700 Subject: [PATCH 07/10] [muxorch] return true if the nbr IP is in the skip_neighbors list (#2407) Orchagent was not programming neighbor IP to h/w L3 host table during initialization because they were set as Standby. When the links became active, SoC IPs were added to the skip_neighbors_ list and never programmed. fix: - Added public method MuxCable::getSkipNeighborsSet() - check if nbr is in skip_neighbors_ when checking if cable is Active Signed-off-by: Nikola Dancejic Co-authored-by: Ubuntu --- orchagent/muxorch.cpp | 4 ++++ orchagent/muxorch.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 6770a4defb..baa0495a60 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -962,6 +962,10 @@ bool MuxOrch::isNeighborActive(const IpAddress& nbr, const MacAddress& mac, stri if (ptr) { + if (ptr->getSkipNeighborsSet().find(nbr) != ptr->getSkipNeighborsSet().end()) + { + return true; + } return ptr->isActive(); } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 011d61b924..c9b09fd823 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -97,6 +97,10 @@ class MuxCable { return nbr_handler_->getNextHopId(nh); } + std::set getSkipNeighborsSet() + { + return skip_neighbors_; + } private: bool stateActive(); From 8dae35645962b0ce8b9b4ddc80d64162b51b5f65 Mon Sep 17 00:00:00 2001 From: oleksandrx-kolomeiets Date: Tue, 9 Aug 2022 20:10:13 +0300 Subject: [PATCH 08/10] [fdborch] Fix FDB flush issues (#2136) * [fdborch] Fix FDB flush issues Signed-off-by: Oleksandr Kolomeiets * Fix coverage * Fix test_FdbAddedAfterMemberCreated. * Fix flush_syncd_notif_ut.cpp. --- orchagent/fdborch.cpp | 31 ++++++++++++++++--- orchagent/fdborch.h | 1 + .../fdborch/flush_syncd_notif_ut.cpp | 24 ++++++++++++++ 3 files changed, 52 insertions(+), 4 deletions(-) diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index a401148836..720d6c5c66 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -219,7 +219,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, for (auto itr = m_entries.begin(); itr != m_entries.end();) { auto curr = itr++; - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) + if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -233,7 +233,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->second.bridge_port_id == bridge_port_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) + if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -248,7 +248,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->first.bv_id == bv_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) + if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -263,7 +263,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->first.bv_id == bv_id && curr->second.bridge_port_id == bridge_port_id) { - if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) + if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) { clearFdbEntry(curr->first); } @@ -819,6 +819,7 @@ void FdbOrch::doTask(Consumer& consumer) fdbData.remote_ip = remote_ip; fdbData.esi = esi; fdbData.vni = vni; + fdbData.is_flush_pending = false; if (addFdbEntry(entry, port, fdbData)) { if (origin == FDB_ORIGIN_MCLAG_ADVERTIZED) @@ -907,6 +908,14 @@ void FdbOrch::doTask(NotificationConsumer& consumer) SWSS_LOG_ERROR("Flush fdb failed, return code %x", status); } + if (status == SAI_STATUS_SUCCESS) { + for (map::iterator it = m_entries.begin(); + it != m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + } + return; } else if (op == "PORT") @@ -1071,6 +1080,20 @@ void FdbOrch::flushFDBEntries(sai_object_id_t bridge_port_oid, { SWSS_LOG_ERROR("Flushing FDB failed. rv:%d", rv); } + + if (SAI_STATUS_SUCCESS == rv) { + for (map::iterator it = m_entries.begin(); + it != m_entries.end(); it++) + { + if ((bridge_port_oid != SAI_NULL_OBJECT_ID && + it->second.bridge_port_id == bridge_port_oid) || + (vlan_oid != SAI_NULL_OBJECT_ID && + it->first.bv_id == vlan_oid)) + { + it->second.is_flush_pending = true; + } + } + } } void FdbOrch::notifyObserversFDBFlush(Port &port, sai_object_id_t& bvid) diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index 949ffbf289..d9f7398237 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -57,6 +57,7 @@ struct FdbData {"static", FDB_ORIGIN_PROVISIONED} => statically provisioned {"static", FDB_ORIGIN_ADVERTIZED} => sticky synced from remote device */ + bool is_flush_pending; /* Remote FDB related info */ string remote_ip; diff --git a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp index 7bf22373c1..d0f8954fd8 100644 --- a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp +++ b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp @@ -184,6 +184,10 @@ namespace fdb_syncd_flush_test /* Event 2: Generate a FDB Flush per port and per vlan */ vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); @@ -228,6 +232,10 @@ namespace fdb_syncd_flush_test /* Event2: Send a Consolidated Flush response from syncd */ vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, SAI_NULL_OBJECT_ID); @@ -272,6 +280,10 @@ namespace fdb_syncd_flush_test /* Event2: Send a Consolidated Flush response from syncd for vlan */ vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); @@ -316,6 +328,10 @@ namespace fdb_syncd_flush_test /* Event2: Send a Consolidated Flush response from syncd for a port */ vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, SAI_NULL_OBJECT_ID); @@ -366,6 +382,10 @@ namespace fdb_syncd_flush_test /* Event 2: Generate a FDB Flush per port and per vlan */ vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, bridge_port_oid, m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); @@ -410,6 +430,10 @@ namespace fdb_syncd_flush_test /* Event 2: Generate a non-consilidated FDB Flush per port and per vlan */ vector flush_mac_addr = {124, 254, 144, 18, 34, 236}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); From 7ddde97483508e0b5c9c18872da67767ae7d1814 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Wed, 10 Aug 2022 13:12:50 -0700 Subject: [PATCH 09/10] Set internal class state to reflect the actual state (#2410) *Set internal class variable to set to STANDBY. *Default value was set to INIT and not modified after setting to standby. --- orchagent/muxorch.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index baa0495a60..afc5ec4120 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -367,6 +367,7 @@ MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress /* Set initial state to "standby" */ stateStandby(); + state_ = MuxState::MUX_STATE_STANDBY; } bool MuxCable::stateInitActive() From fd0c585d34de63dc9cca57bb7d508cadfe1125d4 Mon Sep 17 00:00:00 2001 From: vmittal-msft <46945843+vmittal-msft@users.noreply.github.com> Date: Wed, 10 Aug 2022 16:08:12 -0700 Subject: [PATCH 10/10] PFCWD recovery changes using DLR_INIT (#2316) --- orchagent/orchdaemon.cpp | 27 +++++++--- orchagent/pfcactionhandler.cpp | 43 +++++++++++++++ orchagent/pfcactionhandler.h | 8 +++ orchagent/pfcwdorch.cpp | 34 ++++++++++++ orchagent/pfcwdorch.h | 3 ++ orchagent/qosorch.cpp | 2 +- orchagent/switchorch.cpp | 24 ++++++++- orchagent/switchorch.h | 6 ++- tests/mock_tests/portsorch_ut.cpp | 90 ++++++++++++++++++++++++++++++- 9 files changed, 226 insertions(+), 11 deletions(-) diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 934f1f5d19..1cc3127c31 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -597,13 +597,26 @@ bool OrchDaemon::init() SAI_QUEUE_ATTR_PAUSE_STATUS, }; - m_orchList.push_back(new PfcWdSwOrch( - m_configDb, - pfc_wd_tables, - portStatIds, - queueStatIds, - queueAttrIds, - PFC_WD_POLL_MSECS)); + if(gSwitchOrch->checkPfcDlrInitEnable()) + { + m_orchList.push_back(new PfcWdSwOrch( + m_configDb, + pfc_wd_tables, + portStatIds, + queueStatIds, + queueAttrIds, + PFC_WD_POLL_MSECS)); + } + else + { + m_orchList.push_back(new PfcWdSwOrch( + m_configDb, + pfc_wd_tables, + portStatIds, + queueStatIds, + queueAttrIds, + PFC_WD_POLL_MSECS)); + } } else if (platform == CISCO_8000_PLATFORM_SUBSTRING) { static const vector portStatIds; diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index f7dc20ef26..dee433bcd2 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -262,6 +262,49 @@ PfcWdSaiDlrInitHandler::~PfcWdSaiDlrInitHandler(void) } } +PfcWdDlrHandler::PfcWdDlrHandler(sai_object_id_t port, sai_object_id_t queue, + uint8_t queueId, shared_ptr
countersTable): + PfcWdLossyHandler(port, queue, queueId, countersTable) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_QUEUE_ATTR_PFC_DLR_INIT; + attr.value.booldata = true; + + // Set DLR init to true to start PFC deadlock recovery + sai_status_t status = sai_queue_api->set_queue_attribute(queue, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set PFC DLR INIT on port 0x%" PRIx64 " queue 0x%" PRIx64 + " queueId %d : %d", + port, queue, queueId, status); + return; + } +} + +PfcWdDlrHandler::~PfcWdDlrHandler(void) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t port = getPort(); + sai_object_id_t queue = getQueue(); + uint8_t queueId = getQueueId(); + + sai_attribute_t attr; + attr.id = SAI_QUEUE_ATTR_PFC_DLR_INIT; + attr.value.booldata = false; + + // Set DLR init to false to stop PFC deadlock recovery + sai_status_t status = sai_queue_api->set_queue_attribute(getQueue(), &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to clear PFC DLR INIT on port 0x%" PRIx64 " queue 0x%" PRIx64 + " queueId %d : %d", port, queue, queueId, status); + return; + } +} + PfcWdAclHandler::PfcWdAclHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable): PfcWdLossyHandler(port, queue, queueId, countersTable) diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index d32df433f7..acfc923423 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -115,6 +115,14 @@ class PfcWdAclHandler: public PfcWdLossyHandler void updatePfcAclRule(shared_ptr rule, uint8_t queueId, string strTable, vector port); }; +class PfcWdDlrHandler: public PfcWdLossyHandler +{ + public: + PfcWdDlrHandler(sai_object_id_t port, sai_object_id_t queue, + uint8_t queueId, shared_ptr
countersTable); + virtual ~PfcWdDlrHandler(void); +}; + // PFC queue that implements drop action by draining queue with buffer of zero size class PfcWdZeroBufferHandler: public PfcWdLossyHandler { diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 62765ab0a1..da092387af 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -27,9 +27,12 @@ #define PFC_WD_TC_MAX 8 #define COUNTER_CHECK_POLL_TIMEOUT_SEC 1 +extern sai_object_id_t gSwitchId; +extern sai_switch_api_t* sai_switch_api; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; +extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; template @@ -229,6 +232,36 @@ task_process_status PfcWdOrch::createEntry(const st SWSS_LOG_ERROR("Unsupported action %s for platform %s", value.c_str(), m_platform.c_str()); return task_process_status::task_invalid_entry; } + if(m_platform == BRCM_PLATFORM_SUBSTRING) + { + if(gSwitchOrch->checkPfcDlrInitEnable()) + { + if(getPfcDlrPacketAction() == PfcWdAction::PFC_WD_ACTION_UNKNOWN) + { + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION; + attr.value.u32 = (sai_uint32_t)action; + + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set switch level PFC DLR packet action rv : %d", status); + return task_process_status::task_invalid_entry; + } + setPfcDlrPacketAction(action); + } + else + { + if(getPfcDlrPacketAction() != action) + { + string DlrPacketAction = serializeAction(getPfcDlrPacketAction()); + SWSS_LOG_ERROR("Invalid PFC Watchdog action %s as switch level action %s is set", + value.c_str(), DlrPacketAction.c_str()); + return task_process_status::task_invalid_entry; + } + } + } + } } else { @@ -1064,4 +1097,5 @@ bool PfcWdSwOrch::bake() // Trick to keep member functions in a separate file template class PfcWdSwOrch; template class PfcWdSwOrch; +template class PfcWdSwOrch; template class PfcWdSwOrch; diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 4013ab9ad5..63be1be036 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -49,6 +49,8 @@ class PfcWdOrch: public Orch virtual task_process_status createEntry(const string& key, const vector& data); task_process_status deleteEntry(const string& name); + PfcWdAction getPfcDlrPacketAction() { return PfcDlrPacketAction; } + void setPfcDlrPacketAction(PfcWdAction action) { PfcDlrPacketAction = action; } protected: virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) = 0; @@ -58,6 +60,7 @@ class PfcWdOrch: public Orch shared_ptr m_countersDb = nullptr; shared_ptr
m_countersTable = nullptr; + PfcWdAction PfcDlrPacketAction = PfcWdAction::PFC_WD_ACTION_UNKNOWN; }; template diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 14ca851e76..c6d7bff842 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1775,7 +1775,7 @@ bool QosOrch::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t ma SWSS_LOG_ENTER(); /* Query DSCP_TO_TC QoS map at switch capability */ - bool rv = gSwitchOrch->querySwitchDscpToTcCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); + bool rv = gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); if (rv == false) { SWSS_LOG_ERROR("Switch level DSCP to TC QoS map configuration is not supported"); diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 48ecd1fd35..2ca291c162 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -48,6 +48,27 @@ const map packet_action_map = const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; +void SwitchOrch::set_switch_pfc_dlr_init_capability() +{ + vector fvVector; + + /* Query PFC DLR INIT capability */ + bool rv = querySwitchCapability(SAI_OBJECT_TYPE_QUEUE, SAI_QUEUE_ATTR_PFC_DLR_INIT); + if (rv == false) + { + SWSS_LOG_INFO("Queue level PFC DLR INIT configuration is not supported"); + m_PfcDlrInitEnable = false; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE, "false"); + } + else + { + SWSS_LOG_INFO("Queue level PFC DLR INIT configuration is supported"); + m_PfcDlrInitEnable = true; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE, "true"); + } + set_switch_capability(fvVector); +} + SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, TableConnector switchTable): Orch(connectors), m_switchTable(switchTable.first, switchTable.second), @@ -60,6 +81,7 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl auto restartCheckNotifier = new Notifier(m_restartCheckNotificationConsumer, this, "RESTARTCHECK"); Orch::addExecutor(restartCheckNotifier); + set_switch_pfc_dlr_init_capability(); initSensorsTable(); querySwitchTpidCapability(); auto executorT = new ExecutableTimer(m_sensorsPollerTimer, this, "ASIC_SENSORS_POLL_TIMER"); @@ -762,7 +784,7 @@ void SwitchOrch::querySwitchTpidCapability() } } -bool SwitchOrch::querySwitchDscpToTcCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) +bool SwitchOrch::querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) { SWSS_LOG_ENTER(); diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 5b09a67640..e1e39e5b85 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -11,6 +11,7 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE "PFC_DLR_INIT_CAPABLE" struct WarmRestartCheck { @@ -30,7 +31,9 @@ class SwitchOrch : public Orch void restartCheckReply(const std::string &op, const std::string &data, std::vector &values); bool setAgingFDB(uint32_t sec); void set_switch_capability(const std::vector& values); - bool querySwitchDscpToTcCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); + bool querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); + bool checkPfcDlrInitEnable() { return m_PfcDlrInitEnable; } + void set_switch_pfc_dlr_init_capability(); // Return reference to ACL group created for each stage and the bind point is // the switch @@ -80,6 +83,7 @@ class SwitchOrch : public Orch bool m_sensorsAvgTempSupported = true; bool m_vxlanSportUserModeEnabled = false; bool m_orderedEcmpEnable = false; + bool m_PfcDlrInitEnable = false; // Information contained in the request from // external program for orchagent pre-shutdown state check diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 93e73d9041..6425dca20f 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -17,7 +17,6 @@ extern redisReply *mockReply; namespace portsorch_test { - using namespace std; sai_port_api_t ut_sai_port_api; @@ -83,6 +82,39 @@ namespace portsorch_test { sai_port_api = pold_sai_port_api; } + + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; + int _sai_set_queue_attr_count = 0; + + sai_status_t _ut_stub_sai_set_queue_attribute(sai_object_id_t queue_id, const sai_attribute_t *attr) + { + if(attr->id == SAI_QUEUE_ATTR_PFC_DLR_INIT) + { + if(attr->value.booldata == true) + { + _sai_set_queue_attr_count++; + } + else + { + _sai_set_queue_attr_count--; + } + } + return SAI_STATUS_SUCCESS; + } + + void _hook_sai_queue_api() + { + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + sai_queue_api = &ut_sai_queue_api; + } + + void _unhook_sai_queue_api() + { + sai_queue_api = pold_sai_queue_api; + } struct PortsOrchTest : public ::testing::Test { @@ -588,6 +620,61 @@ namespace portsorch_test ASSERT_TRUE(ts.empty()); } + TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute) + { + _hook_sai_queue_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + // Simulate storm drop handler started on Ethernet0 TC 3 + Port port; + gPortsOrch->getPort("Ethernet0", port); + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + ASSERT_TRUE(_sai_set_queue_attr_count == 1); + + dropHandler.reset(); + ASSERT_FALSE(_sai_set_queue_attr_count == 1); + + _unhook_sai_queue_api(); + } + TEST_F(PortsOrchTest, PfcZeroBufferHandler) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); @@ -1026,4 +1113,5 @@ namespace portsorch_test ASSERT_FALSE(bridgePortCalledBeforeLagMember); // bridge port created on lag before lag member was created } + }