diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index f79c5112e8..ee22cd37c9 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "shellcmd.h" #include "warm_restart.h" +#include "converter.h" using namespace std; using namespace swss; @@ -133,11 +134,11 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) +task_process_status BufferMgr::doSpeedUpdateTask(string port) { - vector fvVectorPg, fvVectorProfile; string cable; string speed; + string pfc_enable; if (m_cableLenLookup.count(port) == 0) { @@ -152,9 +153,24 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - speed = m_speedLookup[port]; + if (m_portStatusLookup.count(port) == 0) + { + // admin_status is not available yet. This can happen when notification of `PORT_QOS_MAP` table + // comes first. + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_need_retry; + } - string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; + if (m_portPfcStatus.count(port) == 0) + { + // PORT_QOS_MAP is not ready yet. The notification is cleared, and buffer pg + // will be handled when `pfc_enable` in `PORT_QOS_MAP` table is available + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_success; + } + pfc_enable = m_portPfcStatus[port]; + speed = m_speedLookup[port]; + // key format is pg_lossless___profile string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = string("[") + @@ -163,25 +179,50 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) buffer_profile_key + "]"; - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + + vector lossless_pgs = tokenize(pfc_enable, ','); + // Convert to bitmap + unsigned long lossless_pg_id = 0; + for (auto pg : lossless_pgs) + { + try + { + uint8_t cur_pg = to_uint(pg); + lossless_pg_id |= (1< lossless_pg_combinations = generateIdListFromMap(lossless_pg_id, sizeof(lossless_pg_id)); - if (!admin_up && m_platform == "mellanox") + if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") { - // Remove the entry in BUFFER_PG table if any - if (!fvVectorPg.empty()) + for (auto lossless_pg : lossless_pg_combinations) { - for (auto &prop : fvVectorPg) + // Remove the entry in BUFFER_PG table if any + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + if (!fvVectorPg.empty()) { - if (fvField(prop) == "profile") + for (auto &prop : fvVectorPg) { - if (fvValue(prop) == profile_ref) + if (fvField(prop) == "profile") { - SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); - m_cfgBufferPgTable.del(buffer_pg_key); - } - else - { - SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + if (fvValue(prop) == profile_ref) + { + SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); + m_cfgBufferPgTable.del(buffer_pg_key); + } + else + { + SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + } } } } @@ -189,14 +230,15 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - + if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) { - SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", - port.c_str(), speed.c_str(), cable.c_str()); - return task_process_status::task_invalid_entry; + SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", + port.c_str(), speed.c_str(), cable.c_str()); + return task_process_status::task_invalid_entry; } + vector fvVectorProfile; // check if profile already exists - if yes - skip creation m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); // Create record in BUFFER_PROFILE table @@ -233,22 +275,34 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) { SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - - /* Check if PG Mapping is already then log message and return. */ - for (auto& prop : fvVectorPg) + + for (auto lossless_pg : lossless_pg_combinations) { - if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + bool profile_existing = false; + /* Check if PG Mapping is already then log message and return. */ + for (auto& prop : fvVectorPg) { - SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - return task_process_status::task_success; + if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + { + SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); + profile_existing = true; + break; + } } - } - - fvVectorPg.clear(); + if (profile_existing) + { + continue; + } + fvVectorPg.clear(); - fvVectorPg.push_back(make_pair("profile", profile_ref)); - SWSS_LOG_INFO("Setting buffer profile to PG %s", buffer_pg_key.c_str()); - m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + fvVectorPg.push_back(make_pair("profile", profile_ref)); + m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + SWSS_LOG_INFO("Setting buffer profile to PG %s", buffer_pg_key.c_str()); + } return task_process_status::task_success; } @@ -388,6 +442,47 @@ void BufferMgr::doBufferMetaTask(Consumer &consumer) } } +/* +Parse PORT_QOS_MAP to retrieve on which queue PFC is enable, and +cached in a map +*/ +void BufferMgr::doPortQosTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + string port_name = kfvKey(tuple); + string op = kfvOp(tuple); + if (op == SET_COMMAND) + { + bool update_pfc_enable = false; + for (auto itp : kfvFieldsValues(tuple)) + { + if (fvField(itp) == "pfc_enable") + { + if (m_portPfcStatus.count(port_name) == 0 || m_portPfcStatus[port_name] != fvValue(itp)) + { + m_portPfcStatus[port_name] = fvValue(itp); + update_pfc_enable = true; + } + SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + break; + } + } + if (update_pfc_enable) + { + // The return status is ignored + doSpeedUpdateTask(port_name); + } + } + it = consumer.m_toSync.erase(it); + } + +} + void BufferMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -441,6 +536,12 @@ void BufferMgr::doTask(Consumer &consumer) return; } + if (table_name == CFG_PORT_QOS_MAP_TABLE_NAME) + { + doPortQosTableTask(consumer); + return; + } + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -464,7 +565,6 @@ void BufferMgr::doTask(Consumer &consumer) } else if (m_pgfile_processed && table_name == CFG_PORT_TABLE_NAME) { - bool admin_up = false; for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "speed") @@ -473,37 +573,37 @@ void BufferMgr::doTask(Consumer &consumer) } if (fvField(i) == "admin_status") { - admin_up = ("up" == fvValue(i)); + m_portStatusLookup[port] = fvValue(i); } } if (m_speedLookup.count(port) != 0) { // create/update profile for port - SWSS_LOG_DEBUG("Port %s Speed %s admin status %d", port.c_str(), m_speedLookup[port].c_str(), admin_up); - task_status = doSpeedUpdateTask(port, admin_up); + SWSS_LOG_DEBUG("Port %s Speed %s admin status %s", port.c_str(), m_speedLookup[port].c_str(), m_portStatusLookup[port].c_str()); + task_status = doSpeedUpdateTask(port); SWSS_LOG_DEBUG("Return code for doSpeedUpdateTask %d", task_status); } } - } - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); + break; + } } } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index d8da025616..54e1acb1e8 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,6 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "3-4" #define BUFFERMGR_TIMER_PERIOD 10 @@ -28,6 +27,8 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; +typedef std::map port_pfc_status_t; +typedef std::map port_admin_status_t; class BufferMgr : public Orch { @@ -56,11 +57,12 @@ class BufferMgr : public Orch pg_profile_lookup_t m_pgProfileLookup; port_cable_length_t m_cableLenLookup; + port_admin_status_t m_portStatusLookup; port_speed_t m_speedLookup; std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up); + task_process_status doSpeedUpdateTask(std::string port); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); @@ -68,6 +70,9 @@ class BufferMgr : public Orch void doTask(Consumer &consumer); void doBufferMetaTask(Consumer &consumer); + + port_pfc_status_t m_portPfcStatus; + void doPortQosTableTask(Consumer &consumer); }; } diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index 71cff8d6c2..9926596d9e 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -210,7 +210,8 @@ int main(int argc, char **argv) CFG_BUFFER_QUEUE_TABLE_NAME, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, - CFG_DEVICE_METADATA_TABLE_NAME + CFG_DEVICE_METADATA_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME }; cfgOrchList.emplace_back(new BufferMgr(&cfgDb, &applDb, pg_lookup_file, cfg_buffer_tables)); } diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index be4c1e51c4..62765ab0a1 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -399,9 +399,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -443,9 +443,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -489,7 +489,7 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, uint8_t pfcMask = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); return false; diff --git a/orchagent/port.h b/orchagent/port.h index ad77f0a5d2..a2a1ce30b2 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -106,7 +106,8 @@ class Port std::vector m_queue_ids; std::vector m_priority_group_ids; sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; - uint8_t m_pfc_bitmask = 0; + uint8_t m_pfc_bitmask = 0; // PFC enable bit mask + uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; uint32_t m_fdb_count = 0; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index a0cd7c2d55..d8294c8d8f 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1040,6 +1040,43 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } +bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; + + m_portList[p.m_alias] = p; + + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmask=0x%x", portId, pfcwd_bitmask); + return true; +} + +bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!pfcwd_bitmask || !getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; + + return true; +} + bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 564e66d34a..152a762893 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -122,6 +122,9 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); + bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); + bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + void generateQueueMap(); void generatePriorityGroupMap(); void generatePortCounterMap(); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 7b2e43e0dd..81f6c00cfe 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1356,6 +1356,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) string op = kfvOp(tuple); sai_uint8_t pfc_enable = 0; + sai_uint8_t pfcwd_sw_enable = 0; map> update_list; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { @@ -1376,14 +1377,24 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); } - if (fvField(*it) == pfc_enable_name) + else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name) { + sai_uint8_t bitmask = 0; vector queue_indexes; queue_indexes = tokenize(fvValue(*it), list_item_delimiter); for(string q_ind : queue_indexes) { sai_uint8_t q_val = (uint8_t)stoi(q_ind); - pfc_enable |= (uint8_t)(1 << q_val); + bitmask |= (uint8_t)(1 << q_val); + } + + if (fvField(*it) == pfc_enable_name) + { + pfc_enable = bitmask; + } + else + { + pfcwd_sw_enable = bitmask; } } } @@ -1436,6 +1447,9 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) SWSS_LOG_INFO("Applied PFC bits 0x%x to port %s", pfc_enable, port_name.c_str()); } + + // Save pfd_wd bitmask unconditionally + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, pfcwd_sw_enable); } SWSS_LOG_NOTICE("Applied QoS maps to ports"); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 37002be566..b5da05a68a 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -13,6 +13,7 @@ const string dot1p_to_tc_field_name = "dot1p_to_tc_map"; const string pfc_to_pg_map_name = "pfc_to_pg_map"; const string pfc_to_queue_map_name = "pfc_to_queue_map"; const string pfc_enable_name = "pfc_enable"; +const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; const string scheduler_field_name = "scheduler"; diff --git a/tests/conftest.py b/tests/conftest.py index 9b565cfa06..dafbc04309 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1101,6 +1101,16 @@ def getVlanOid(self, vlanId): break return vlan_oid + def port_field_set(self, port, field, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "PORT") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(port, fvs) + time.sleep(1) + + def port_admin_set(self, port, status): + self.port_field_set(port, "admin_status", status) + # deps: acl_portchannel, fdb def getCrmCounterValue(self, key, counter): counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, self.redis_sock, 0) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 44bea70620..ce3de0b577 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,7 +3,7 @@ class TestBuffer(object): - LOSSLESS_PGS = [3, 4] + lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): @@ -15,6 +15,10 @@ def setup_db(self, dvs): # enable PG watermark self.set_pg_wm_status('enable') + def get_pfc_enable_queues(self): + qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + return qos_map['pfc_enable'].split(',') + def get_pg_oid(self, pg): fvs = dict() fvs = self.counter_db.get_entry("COUNTERS_PG_NAME_MAP", "") @@ -51,19 +55,35 @@ def get_asic_buf_pg_profiles(self): buf_pg_entries = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg]) self.buf_pg_profile[pg] = buf_pg_entries["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] - def change_cable_len(self, cable_len): + def change_cable_len(self, cable_len, extra_port=None): fvs = dict() fvs[self.INTF] = cable_len + if extra_port: + fvs[extra_port] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) + def set_port_qos_table(self, port, pfc_enable_flag): + fvs=dict() + fvs['pfc_enable'] = pfc_enable_flag + self.config_db.update_entry("PORT_QOS_MAP", port, fvs) + self.lossless_pgs = pfc_enable_flag.split(',') + # Replace 3,4 with the combination 3-4 to be back compatible + self.lossless_pg_combinations = pfc_enable_flag.replace('3,4', '3-4').split(',') + + def get_pg_name_map(self): + pg_name_map = dict() + for pg in self.lossless_pgs: + pg_name = "{}:{}".format(self.INTF, pg) + pg_name_map[pg_name] = self.get_pg_oid(pg_name) + return pg_name_map + @pytest.fixture def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - pg_name_map = dict() - for pg in self.LOSSLESS_PGS: - pg_name = "{}:{}".format(self.INTF, pg) - pg_name_map[pg_name] = self.get_pg_oid(pg_name) + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] + pg_name_map = self.get_pg_name_map() yield pg_name_map finally: self.teardown() @@ -119,7 +139,8 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(orig_lossless_profile)}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] @@ -152,3 +173,79 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): if orig_speed: dvs.runcmd("config interface speed {} {}".format(self.INTF, orig_speed)) dvs.runcmd("config interface shutdown {}".format(self.INTF)) + + # To verify the BUFFER_PG is not hardcoded to 3,4 + # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue + def test_buffer_pg_update(self, dvs, setup_teardown_test): + self.pg_name_map = setup_teardown_test + orig_cable_len = None + orig_speed = None + test_speed = None + extra_port = "Ethernet4" + try: + # Retrieve cable len + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len[self.INTF] + if orig_cable_len == "0m": + cable_len_for_test = "300m" + fvs_cable_len[self.INTF] = cable_len_for_test + fvs_cable_len[extra_port] = cable_len_for_test + + self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) + else: + cable_len_for_test = orig_cable_len + # Ethernet4 is set to up, while no 'pfc_enable' available. `Ethernet0` is not supposed to be impacted + dvs.port_admin_set(extra_port, "up") + + dvs.port_admin_set(self.INTF, "up") + + # Retrieve port speed + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_speed = fvs_port["speed"] + + # Make sure the buffer PG has been created + orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.orig_profiles = self.get_asic_buf_profile() + + # get the orig buf profiles attached to the pgs + self.get_asic_buf_pg_profiles() + + # Update port speed + if orig_speed == "100000": + test_speed = "40000" + elif orig_speed == "40000": + test_speed = "100000" + # change intf speed to 'test_speed' + dvs.port_field_set(self.INTF, "speed", test_speed) + dvs.port_field_set(extra_port, "speed", test_speed) + # Verify new profile is generated + new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) + + # Verify BUFFER_PG is updated + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) + + fvs_negative = {} + for pg in self.pg_name_map: + # verify that buffer pgs do not point to the old profile since we cannot deduce the new profile oid + fvs_negative["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] + self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) + + # Add pfc_enable field for extra port + self.set_port_qos_table(extra_port, '2,3,4,6') + self.lossless_pg_combinations = ['2-4', '6'] + time.sleep(1) + # Verify BUFFER_PG is updated when pfc_enable is available + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(new_lossless_profile)}) + finally: + if orig_cable_len: + self.change_cable_len(orig_cable_len, extra_port) + if orig_speed: + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_field_set(extra_port, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") + dvs.port_admin_set(extra_port, "down") + diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index 78cd851574..249609aee2 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -103,7 +103,7 @@ def setup_test(self, dvs): # set cable len to non zero value. if port is down, default cable len is 0 self.set_cable_len(port, "5m") # startup port - dvs.runcmd("config interface startup {}".format(port)) + dvs.port_admin_set(port, "up") # enable pfcwd self.set_flex_counter_status("PFCWD", "enable") @@ -120,7 +120,7 @@ def teardown_test(self, dvs): if self.orig_cable_len: self.set_cable_len(port, self.orig_cable_len[port]) # shutdown port - dvs.runcmd("config interface shutdown {}".format(port)) + dvs.port_admin_set(port, "down") def get_db_handle(self, dvs): self.app_db = dvs.get_app_db() @@ -148,9 +148,11 @@ def _get_bitmask(self, queues): return str(mask) def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + keyname = 'pfcwd_sw_enable' for port in self.test_ports: if 'enable' in status: - fvs = {'pfc_enable': ",".join([str(q) for q in pfc_queues])} + queues = ",".join([str(q) for q in pfc_queues]) + fvs = {keyname: queues, 'pfc_enable': queues} self.config_db.create_entry("PORT_QOS_MAP", port, fvs) else: self.config_db.delete_entry("PORT_QOS_MAP", port) @@ -212,7 +214,7 @@ def set_storm_state(self, queues, state="enabled"): queue_name = port + ":" + str(queue) self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) - def test_pfcwd_single_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_single_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] @@ -253,7 +255,7 @@ def test_pfcwd_single_queue(self, dvs, setup_teardown_test): self.reset_pfcwd_counters(storm_queue) self.stop_pfcwd_on_ports() - def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_multi_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] @@ -293,7 +295,6 @@ def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): self.reset_pfcwd_counters(test_queues) self.stop_pfcwd_on_ports() -# # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy():