From c2de7fc5a962043202900aca35ca081a0877db26 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Fri, 1 Apr 2022 04:49:04 +0800 Subject: [PATCH] [QosOrch] The notifications cannot be drained in QosOrch in case the first one needs to retry (#2206) What I did Bugfix: The notifications cannot be drained in QosOrch in case the first one needs to retry Why I did it Bugfix How I verified it Mock test and manual test. Details if related This is because both doTask and the table handlers take consumer as the argument. In doTask, each notification is iterated in the main loop and passed to table handlers. However, consumer is passed to table handlers, which makes it unable to access the rest notifications following the first one. In case the first one needs to retry and remains in m_toSync, the rest notifications do not have opportunity to run. Signed-off-by: Stephen Sun --- orchagent/qosorch.cpp | 56 +++++++++----------- orchagent/qosorch.h | 30 +++++------ tests/mock_tests/qosorch_ut.cpp | 93 +++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 46 deletions(-) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index d1a24cb5c9a8..616c08f1ae39 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -98,13 +98,11 @@ map qos_to_ref_table_map = { #define DSCP_MAX_VAL 63 #define EXP_MAX_VAL 7 -task_process_status QosMapHandler::processWorkItem(Consumer& consumer) +task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - auto it = consumer.m_toSync.begin(); - KeyOpFieldsValuesTuple tuple = it->second; string qos_object_name = kfvKey(tuple); string qos_map_type_name = consumer.getTableName(); string op = kfvOp(tuple); @@ -321,11 +319,11 @@ bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object) return true; } -task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer) +task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); DscpToTcMapHandler dscp_tc_handler; - return dscp_tc_handler.processWorkItem(consumer); + return dscp_tc_handler.processWorkItem(consumer, tuple); } bool MplsTcToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -376,11 +374,11 @@ sai_object_id_t MplsTcToTcMapHandler::addQosItem(const vector & return sai_object; } -task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer) +task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); MplsTcToTcMapHandler mpls_tc_to_tc_handler; - return mpls_tc_to_tc_handler.processWorkItem(consumer); + return mpls_tc_to_tc_handler.processWorkItem(consumer, tuple); } bool Dot1pToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -445,11 +443,11 @@ sai_object_id_t Dot1pToTcMapHandler::addQosItem(const vector &a return object_id; } -task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer) +task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); Dot1pToTcMapHandler dot1p_tc_handler; - return dot1p_tc_handler.processWorkItem(consumer); + return dot1p_tc_handler.processWorkItem(consumer, tuple); } bool TcToQueueMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -498,11 +496,11 @@ sai_object_id_t TcToQueueMapHandler::addQosItem(const vector &a return sai_object; } -task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer) +task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); TcToQueueMapHandler tc_queue_handler; - return tc_queue_handler.processWorkItem(consumer); + return tc_queue_handler.processWorkItem(consumer, tuple); } void WredMapHandler::freeAttribResources(vector &attributes) @@ -719,11 +717,11 @@ bool WredMapHandler::removeQosItem(sai_object_id_t sai_object) return true; } -task_process_status QosOrch::handleWredProfileTable(Consumer& consumer) +task_process_status QosOrch::handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); WredMapHandler wred_handler; - return wred_handler.processWorkItem(consumer); + return wred_handler.processWorkItem(consumer, tuple); } bool TcToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -772,11 +770,11 @@ sai_object_id_t TcToPgHandler::addQosItem(const vector &attribu } -task_process_status QosOrch::handleTcToPgTable(Consumer& consumer) +task_process_status QosOrch::handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); TcToPgHandler tc_to_pg_handler; - return tc_to_pg_handler.processWorkItem(consumer); + return tc_to_pg_handler.processWorkItem(consumer, tuple); } bool PfcPrioToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -826,11 +824,11 @@ sai_object_id_t PfcPrioToPgHandler::addQosItem(const vector &at } -task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer) +task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); PfcPrioToPgHandler pfc_prio_to_pg_handler; - return pfc_prio_to_pg_handler.processWorkItem(consumer); + return pfc_prio_to_pg_handler.processWorkItem(consumer, tuple); } bool PfcToQueueHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -967,11 +965,11 @@ sai_object_id_t DscpToFcMapHandler::addQosItem(const vector &at return sai_object; } -task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer) +task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); DscpToFcMapHandler dscp_fc_handler; - return dscp_fc_handler.processWorkItem(consumer); + return dscp_fc_handler.processWorkItem(consumer, tuple); } bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, @@ -1058,18 +1056,18 @@ sai_object_id_t ExpToFcMapHandler::addQosItem(const vector &att return sai_object; } -task_process_status QosOrch::handleExpToFcTable(Consumer& consumer) +task_process_status QosOrch::handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); ExpToFcMapHandler exp_fc_handler; - return exp_fc_handler.processWorkItem(consumer); + return exp_fc_handler.processWorkItem(consumer, tuple); } -task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer) +task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); PfcToQueueHandler pfc_to_queue_handler; - return pfc_to_queue_handler.processWorkItem(consumer); + return pfc_to_queue_handler.processWorkItem(consumer, tuple); } QosOrch::QosOrch(DBConnector *db, vector &tableNames) : Orch(db, tableNames) @@ -1104,14 +1102,13 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, &QosOrch::handlePfcToQueueTable)); } -task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) +task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); sai_status_t sai_status; sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string qos_map_type_name = CFG_SCHEDULER_TABLE_NAME; string qos_object_name = kfvKey(tuple); string op = kfvOp(tuple); @@ -1457,11 +1454,9 @@ bool QosOrch::applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_i return true; } -task_process_status QosOrch::handleQueueTable(Consumer& consumer) +task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); - auto it = consumer.m_toSync.begin(); - KeyOpFieldsValuesTuple tuple = it->second; Port port; bool result; string key = kfvKey(tuple); @@ -1690,11 +1685,10 @@ task_process_status QosOrch::ResolveMapAndApplyToPort( return task_process_status::task_success; } -task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) +task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); - KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string key = kfvKey(tuple); string op = kfvOp(tuple); vector port_names = tokenize(key, list_item_delimiter); @@ -1898,7 +1892,7 @@ void QosOrch::doTask(Consumer &consumer) continue; } - auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer); + auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer, it->second); switch(task_status) { case task_process_status::task_success : diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 613bc7437ede..0401cc6e6f50 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -59,7 +59,7 @@ const string ecn_all = "ecn_all"; class QosMapHandler { public: - task_process_status processWorkItem(Consumer& consumer); + task_process_status processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); virtual bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) = 0; virtual void freeAttribResources(vector &attributes); virtual bool modifyQosItem(sai_object_id_t, vector &attributes); @@ -158,25 +158,25 @@ class QosOrch : public Orch void doTask() override; virtual void doTask(Consumer& consumer); - typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer); + typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); typedef map qos_table_handler_map; typedef pair qos_handler_pair; void initTableHandlers(); - task_process_status handleDscpToTcTable(Consumer& consumer); - task_process_status handleMplsTcToTcTable(Consumer& consumer); - task_process_status handleDot1pToTcTable(Consumer& consumer); - task_process_status handlePfcPrioToPgTable(Consumer& consumer); - task_process_status handlePfcToQueueTable(Consumer& consumer); - task_process_status handlePortQosMapTable(Consumer& consumer); - task_process_status handleTcToPgTable(Consumer& consumer); - task_process_status handleTcToQueueTable(Consumer& consumer); - task_process_status handleSchedulerTable(Consumer& consumer); - task_process_status handleQueueTable(Consumer& consumer); - task_process_status handleWredProfileTable(Consumer& consumer); - task_process_status handleDscpToFcTable(Consumer& consumer); - task_process_status handleExpToFcTable(Consumer& consumer); + task_process_status handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleDot1pToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index a77d19b38bf0..0644058a469b 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -786,4 +786,97 @@ namespace qosorch_test static_cast(gQosOrch)->doTask(); ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); } + + TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) + { + // There was a bug in QosOrch that the 2nd notifications and after can not be handled, eg the 1st one needs to be retried + // This is to verify the bug has been fixed + vector ts; + std::deque entries; + + // Try adding dscp_to_tc_map AZURE.1 and AZURE to PORT_QOS_MAP table + // The object AZURE.1 does not exist so the first item can not be handled and remain in m_toSync. + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE.1"} + }}); + entries.push_back({"Ethernet4", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet4", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure there is one item left + portQosMapConsumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "PORT_QOS_MAP|Ethernet0|SET|dscp_to_tc_map:AZURE.1"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + // Try adding scheduler.0 and scheduler.2 to QUEUE table + entries.push_back({"Ethernet0|0", "SET", + { + {"scheduler", "scheduler.2"} + }}); + entries.push_back({"Ethernet0|1", "SET", + { + {"scheduler", "scheduler.0"} + }}); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + queueConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|1", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + // Make sure there is one item left + queueConsumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "QUEUE|Ethernet0|0|SET|scheduler:scheduler.2"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + // Try removing AZURE and adding AZURE.1 to DSCP_TO_TC_MAP table + entries.push_back({"AZURE", "DEL", {{}}}); + entries.push_back({"AZURE.1", "SET", + { + {"1", "1"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE.1"); + // The pending item in PORT_QOS_MAP table should also be handled since the dependency is met + portQosMapConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + consumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL|:"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + entries.push_back({"scheduler.0", "DEL", {{}}}); + entries.push_back({"scheduler.2", "SET", + { + {"type", "DWRR"}, + {"weight", "15"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // We need a second call to "doTask" because scheduler table is handled after queue table + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.2"); + // The pending item in QUEUE table should also be handled since the dependency is met + queueConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + consumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL|:"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + } }