From 6d660796a580fa398028aa8224b8f29eab58d82f Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Wed, 26 Jan 2022 14:37:29 -0800 Subject: [PATCH] [202012] [vnetorch] Add ECMP support for vnet tunnel routes with endpoint health monitoring (#2104) What I did Cherry-pick changes in #1960, #1883, #1955, #2058 Changes in #1960: Add functions to create/remove next hop groups for vnet tunnel routes. Count the reference count of next hop groups to create and remove as needed. Share the counter of next hop groups with routeorch. Add vs test Changes in #1883: Implement bfdorch to program hardware BFD sessions via bfd SAI. Add vs test for bfd sessions. Changes in #1955: Add functions to create/remove next hop groups for vnet tunnel routes. Count the reference count of next hop groups to create and remove as needed. Share the counter of next hop groups with routeorch. Adapt route endpoint according to the BFD state of endpoints. Changes in #2058: Advertise active vnet tunnel routes. Why I did it To add support for overlay ECMP with endpoint health monitoring. --- orchagent/Makefile.am | 3 +- orchagent/bfdorch.cpp | 442 ++++++++++++++++ orchagent/bfdorch.h | 37 ++ orchagent/main.cpp | 4 + orchagent/nexthopkey.h | 2 + orchagent/notifications.cpp | 6 + orchagent/notifications.h | 1 + orchagent/observer.h | 1 + orchagent/orchdaemon.cpp | 5 +- orchagent/orchdaemon.h | 1 + orchagent/routeorch.cpp | 14 + orchagent/routeorch.h | 4 + orchagent/saihelper.cpp | 3 + orchagent/vnetorch.cpp | 879 ++++++++++++++++++++++++++++++-- orchagent/vnetorch.h | 110 +++- orchagent/vxlanorch.h | 4 + tests/mock_tests/Makefile.am | 3 +- tests/test_bfd.py | 466 +++++++++++++++++ tests/test_vnet.py | 964 ++++++++++++++++++++++++++++++++++- 19 files changed, 2876 insertions(+), 73 deletions(-) create mode 100644 orchagent/bfdorch.cpp create mode 100644 orchagent/bfdorch.h create mode 100644 tests/test_bfd.py diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 59307ddfeb..4b79fac869 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -63,7 +63,8 @@ orchagent_SOURCES = \ chassisorch.cpp \ debugcounterorch.cpp \ natorch.cpp \ - muxorch.cpp + muxorch.cpp \ + bfdorch.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp new file mode 100644 index 0000000000..68295842b3 --- /dev/null +++ b/orchagent/bfdorch.cpp @@ -0,0 +1,442 @@ +#include "bfdorch.h" +#include "intfsorch.h" +#include "vrforch.h" +#include "converter.h" +#include "swssnet.h" +#include "notifier.h" +#include "sai_serialize.h" +#include "directory.h" + +using namespace std; +using namespace swss; + +#define BFD_SESSION_DEFAULT_TX_INTERVAL 1000 +#define BFD_SESSION_DEFAULT_RX_INTERVAL 1000 +#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 3 +#define BFD_SESSION_MILLISECOND_TO_MICROSECOND 1000 +#define BFD_SRCPORTINIT 49152 +#define BFD_SRCPORTMAX 65536 + +extern sai_bfd_api_t* sai_bfd_api; +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gVirtualRouterId; +extern PortsOrch* gPortsOrch; +extern Directory gDirectory; + +const map session_type_map = +{ + {"demand_active", SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE}, + {"demand_passive", SAI_BFD_SESSION_TYPE_DEMAND_PASSIVE}, + {"async_active", SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE}, + {"async_passive", SAI_BFD_SESSION_TYPE_ASYNC_PASSIVE} +}; + +const map session_type_lookup = +{ + {SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE, "demand_active"}, + {SAI_BFD_SESSION_TYPE_DEMAND_PASSIVE, "demand_passive"}, + {SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE, "async_active"}, + {SAI_BFD_SESSION_TYPE_ASYNC_PASSIVE, "async_passive"} +}; + +const map session_state_lookup = +{ + {SAI_BFD_SESSION_STATE_ADMIN_DOWN, "Admin_Down"}, + {SAI_BFD_SESSION_STATE_DOWN, "Down"}, + {SAI_BFD_SESSION_STATE_INIT, "Init"}, + {SAI_BFD_SESSION_STATE_UP, "Up"} +}; + +BfdOrch::BfdOrch(DBConnector *db, string tableName, TableConnector stateDbBfdSessionTable): + Orch(db, tableName), + m_stateBfdSessionTable(stateDbBfdSessionTable.first, stateDbBfdSessionTable.second) +{ + SWSS_LOG_ENTER(); + + DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); + m_bfdStateNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + auto bfdStateNotificatier = new Notifier(m_bfdStateNotificationConsumer, this, "BFD_STATE_NOTIFICATIONS"); + Orch::addExecutor(bfdStateNotificatier); +} + +BfdOrch::~BfdOrch(void) +{ + SWSS_LOG_ENTER(); +} + +void BfdOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + if (!create_bfd_session(key, data)) + { + it++; + continue; + } + } + else if (op == DEL_COMMAND) + { + if (!remove_bfd_session(key)) + { + it++; + continue; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + + it = consumer.m_toSync.erase(it); + } +} + +void BfdOrch::doTask(NotificationConsumer &consumer) +{ + SWSS_LOG_ENTER(); + + std::string op; + std::string data; + std::vector values; + + consumer.pop(op, data, values); + + if (&consumer != m_bfdStateNotificationConsumer) + { + return; + } + + if (op == "bfd_session_state_change") + { + uint32_t count; + sai_bfd_session_state_notification_t *bfdSessionState = nullptr; + + sai_deserialize_bfd_session_state_ntf(data, count, &bfdSessionState); + + for (uint32_t i = 0; i < count; i++) + { + sai_object_id_t id = bfdSessionState[i].bfd_session_id; + sai_bfd_session_state_t state = bfdSessionState[i].session_state; + + SWSS_LOG_INFO("Get BFD session state change notification id:%" PRIx64 " state: %s", id, session_state_lookup.at(state).c_str()); + + if (state != bfd_session_lookup[id].state) + { + auto key = bfd_session_lookup[id].peer; + m_stateBfdSessionTable.hset(key, "state", session_state_lookup.at(state)); + + SWSS_LOG_NOTICE("BFD session state for %s changed from %s to %s", key.c_str(), + session_state_lookup.at(bfd_session_lookup[id].state).c_str(), session_state_lookup.at(state).c_str()); + + BfdUpdate update; + update.peer = key; + update.state = state; + notify(SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE, static_cast(&update)); + + bfd_session_lookup[id].state = state; + } + } + + sai_deserialize_free_bfd_session_state_ntf(count, bfdSessionState); + } +} + +bool BfdOrch::create_bfd_session(const string& key, const vector& data) +{ + if (bfd_session_map.find(key) != bfd_session_map.end()) + { + SWSS_LOG_ERROR("BFD session for %s already exists", key.c_str()); + return true; + } + + size_t found_vrf = key.find(delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + return true; + } + + size_t found_ifname = key.find(delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no ifname is given", key.c_str()); + return true; + } + + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + + sai_bfd_session_type_t bfd_session_type = SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE; + sai_bfd_encapsulation_type_t encapsulation_type = SAI_BFD_ENCAPSULATION_TYPE_NONE; + IpAddress src_ip; + uint32_t tx_interval = BFD_SESSION_DEFAULT_TX_INTERVAL; + uint32_t rx_interval = BFD_SESSION_DEFAULT_RX_INTERVAL; + uint8_t multiplier = BFD_SESSION_DEFAULT_DETECT_MULTIPLIER; + bool multihop = false; + MacAddress dst_mac; + bool dst_mac_provided = false; + bool src_ip_provided = false; + + sai_attribute_t attr; + vector attrs; + vector fvVector; + + for (auto i : data) + { + auto value = fvValue(i); + + if (fvField(i) == "tx_interval") + { + tx_interval = to_uint(value); + } + else if (fvField(i) == "rx_interval") + { + rx_interval = to_uint(value); + } + else if (fvField(i) == "multiplier") + { + multiplier = to_uint(value); + } + else if (fvField(i) == "multihop") + { + multihop = (value == "true") ? true : false; + } + else if (fvField(i) == "local_addr") + { + src_ip = IpAddress(value); + src_ip_provided = true; + } + else if (fvField(i) == "type") + { + if (session_type_map.find(value) == session_type_map.end()) + { + SWSS_LOG_ERROR("Invalid BFD session type %s\n", value.c_str()); + continue; + } + bfd_session_type = session_type_map.at(value); + } + else if (fvField(i) == "dst_mac") + { + dst_mac = MacAddress(value); + dst_mac_provided = true; + } + else + SWSS_LOG_ERROR("Unsupported BFD attribute %s\n", fvField(i).c_str()); + } + + if (!src_ip_provided) + { + SWSS_LOG_ERROR("Failed to create BFD session %s because source IP is not provided", key.c_str()); + return true; + } + + attr.id = SAI_BFD_SESSION_ATTR_TYPE; + attr.value.s32 = bfd_session_type; + attrs.emplace_back(attr); + fvVector.emplace_back("type", session_type_lookup.at(bfd_session_type)); + + attr.id = SAI_BFD_SESSION_ATTR_LOCAL_DISCRIMINATOR; + attr.value.u32 = bfd_gen_id(); + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_UDP_SRC_PORT; + attr.value.u32 = bfd_src_port(); + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_REMOTE_DISCRIMINATOR; + attr.value.u32 = 0; + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_BFD_ENCAPSULATION_TYPE; + attr.value.s32 = encapsulation_type; + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_IPHDR_VERSION; + attr.value.u8 = src_ip.isV4() ? 4 : 6; + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS; + copy(attr.value.ipaddr, src_ip); + attrs.emplace_back(attr); + fvVector.emplace_back("local_addr", src_ip.to_string()); + + attr.id = SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS; + copy(attr.value.ipaddr, peer_address); + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_MIN_TX; + attr.value.u32 = tx_interval * BFD_SESSION_MILLISECOND_TO_MICROSECOND; + attrs.emplace_back(attr); + fvVector.emplace_back("tx_interval", to_string(tx_interval)); + + attr.id = SAI_BFD_SESSION_ATTR_MIN_RX; + attr.value.u32 = rx_interval * BFD_SESSION_MILLISECOND_TO_MICROSECOND; + attrs.emplace_back(attr); + fvVector.emplace_back("rx_interval", to_string(rx_interval)); + + attr.id = SAI_BFD_SESSION_ATTR_MULTIPLIER; + attr.value.u8 = multiplier; + attrs.emplace_back(attr); + fvVector.emplace_back("multiplier", to_string(multiplier)); + + if (multihop) + { + attr.id = SAI_BFD_SESSION_ATTR_MULTIHOP; + attr.value.booldata = true; + attrs.emplace_back(attr); + fvVector.emplace_back("multihop", "true"); + } + else + { + fvVector.emplace_back("multihop", "false"); + } + + if (alias != "default") + { + Port port; + if (!gPortsOrch->getPort(alias, port)) + { + SWSS_LOG_ERROR("Failed to locate port %s", alias.c_str()); + return false; + } + + if (!dst_mac_provided) + { + SWSS_LOG_ERROR("Failed to create BFD session %s: destination MAC address required when hardware lookup not valid", + key.c_str()); + return true; + } + + if (vrf_name != "default") + { + SWSS_LOG_ERROR("Failed to create BFD session %s: vrf is not supported when hardware lookup not valid", + key.c_str()); + return true; + } + + attr.id = SAI_BFD_SESSION_ATTR_HW_LOOKUP_VALID; + attr.value.booldata = false; + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_PORT; + attr.value.oid = port.m_port_id; + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_SRC_MAC_ADDRESS; + memcpy(attr.value.mac, port.m_mac.getMac(), sizeof(sai_mac_t)); + attrs.emplace_back(attr); + + attr.id = SAI_BFD_SESSION_ATTR_DST_MAC_ADDRESS; + memcpy(attr.value.mac, dst_mac.getMac(), sizeof(sai_mac_t)); + attrs.emplace_back(attr); + } + else + { + if (dst_mac_provided) + { + SWSS_LOG_ERROR("Failed to create BFD session %s: destination MAC address not supported when hardware lookup valid", + key.c_str()); + return true; + } + + attr.id = SAI_BFD_SESSION_ATTR_VIRTUAL_ROUTER; + if (vrf_name == "default") + { + attr.value.oid = gVirtualRouterId; + } + else + { + VRFOrch* vrf_orch = gDirectory.get(); + attr.value.oid = vrf_orch->getVRFid(vrf_name); + } + + attrs.emplace_back(attr); + } + + fvVector.emplace_back("state", session_state_lookup.at(SAI_BFD_SESSION_STATE_DOWN)); + + sai_object_id_t bfd_session_id = SAI_NULL_OBJECT_ID; + sai_status_t status = sai_bfd_api->create_bfd_session(&bfd_session_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create bfd session %s, rv:%d", key.c_str(), status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_BFD, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + const string state_db_key = get_state_db_key(vrf_name, alias, peer_address); + m_stateBfdSessionTable.set(state_db_key, fvVector); + bfd_session_map[key] = bfd_session_id; + bfd_session_lookup[bfd_session_id] = {state_db_key, SAI_BFD_SESSION_STATE_DOWN}; + + BfdUpdate update; + update.peer = state_db_key; + update.state = SAI_BFD_SESSION_STATE_DOWN; + notify(SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE, static_cast(&update)); + + return true; +} + +bool BfdOrch::remove_bfd_session(const string& key) +{ + if (bfd_session_map.find(key) == bfd_session_map.end()) + { + SWSS_LOG_ERROR("BFD session for %s does not exist", key.c_str()); + return true; + } + + sai_object_id_t bfd_session_id = bfd_session_map[key]; + sai_status_t status = sai_bfd_api->remove_bfd_session(bfd_session_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove bfd session %s, rv:%d", key.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BFD, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + m_stateBfdSessionTable.del(bfd_session_lookup[bfd_session_id].peer); + bfd_session_map.erase(key); + bfd_session_lookup.erase(bfd_session_id); + + return true; +} + +string BfdOrch::get_state_db_key(const string& vrf_name, const string& alias, const IpAddress& peer_address) +{ + return vrf_name + state_db_key_delimiter + alias + state_db_key_delimiter + peer_address.to_string(); +} + +uint32_t BfdOrch::bfd_gen_id(void) +{ + static uint32_t session_id = 1; + return (session_id++); +} + +uint32_t BfdOrch::bfd_src_port(void) +{ + static uint32_t port = BFD_SRCPORTINIT; + if (port >= BFD_SRCPORTMAX) + { + port = BFD_SRCPORTINIT; + } + + return (port++); +} diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h new file mode 100644 index 0000000000..6be1f8deae --- /dev/null +++ b/orchagent/bfdorch.h @@ -0,0 +1,37 @@ +#ifndef SWSS_BFDORCH_H +#define SWSS_BFDORCH_H + +#include "orch.h" +#include "observer.h" + +struct BfdUpdate +{ + std::string peer; + sai_bfd_session_state_t state; +}; + +class BfdOrch: public Orch, public Subject +{ +public: + void doTask(Consumer &consumer); + void doTask(swss::NotificationConsumer &consumer); + BfdOrch(swss::DBConnector *db, std::string tableName, TableConnector stateDbBfdSessionTable); + virtual ~BfdOrch(void); + +private: + bool create_bfd_session(const std::string& key, const std::vector& data); + bool remove_bfd_session(const std::string& key); + std::string get_state_db_key(const std::string& vrf_name, const std::string& alias, const swss::IpAddress& peer_address); + + uint32_t bfd_gen_id(void); + uint32_t bfd_src_port(void); + + std::map bfd_session_map; + std::map bfd_session_lookup; + + swss::Table m_stateBfdSessionTable; + + swss::NotificationConsumer* m_bfdStateNotificationConsumer; +}; + +#endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/main.cpp b/orchagent/main.cpp index a4f30a9f2d..d12f460d20 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -297,6 +297,10 @@ int main(int argc, char **argv) attr.value.ptr = (void *)on_port_state_change; attrs.push_back(attr); + attr.id = SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY; + attr.value.ptr = (void *)on_bfd_session_state_change; + attrs.push_back(attr); + attr.id = SAI_SWITCH_ATTR_SHUTDOWN_REQUEST_NOTIFY; attr.value.ptr = (void *)on_switch_shutdown_request; attrs.push_back(attr); diff --git a/orchagent/nexthopkey.h b/orchagent/nexthopkey.h index 69a94505ae..a56c85607e 100644 --- a/orchagent/nexthopkey.h +++ b/orchagent/nexthopkey.h @@ -68,6 +68,8 @@ struct NextHopKey mac_address = keys[3]; } + NextHopKey(const IpAddress &ip, const MacAddress &mac, const uint32_t &vni, bool overlay_nh) : ip_address(ip), alias(""), vni(vni), mac_address(mac){} + const std::string to_string() const { return ip_address.to_string() + NH_DELIMITER + alias; diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 209c03d83b..1a49526370 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -17,6 +17,12 @@ void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *d // which causes concurrency access to the DB } +void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB +} + void on_switch_shutdown_request() { SWSS_LOG_ENTER(); diff --git a/orchagent/notifications.h b/orchagent/notifications.h index 2ee207ebfa..ea22593a1f 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -6,4 +6,5 @@ extern "C" { void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data); void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data); +void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data); void on_switch_shutdown_request(); diff --git a/orchagent/observer.h b/orchagent/observer.h index 76f00f1bfd..10ce7e2509 100644 --- a/orchagent/observer.h +++ b/orchagent/observer.h @@ -18,6 +18,7 @@ enum SubjectType SUBJECT_TYPE_PORT_CHANGE, SUBJECT_TYPE_PORT_OPER_STATE_CHANGE, SUBJECT_TYPE_FDB_FLUSH_CHANGE, + SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE }; class Observer diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index a4155e53f6..190f4e3f25 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -38,6 +38,7 @@ BufferOrch *gBufferOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; +BfdOrch *gBfdOrch; bool gIsNatSupported = false; @@ -106,6 +107,8 @@ bool OrchDaemon::init() gPortsOrch = new PortsOrch(m_applDb, ports_tables); TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); gFdbOrch = new FdbOrch(m_applDb, app_fdb_tables, stateDbFdb, gPortsOrch); + TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); + gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); vector vnet_tables = { APP_VNET_RT_TABLE_NAME, @@ -271,7 +274,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gIntfsOrch, gNeighOrch, gRouteOrch, copp_orch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, mux_orch, mux_cb_orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gIntfsOrch, gNeighOrch, gRouteOrch, copp_orch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, mux_orch, mux_cb_orch, gBfdOrch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index 1215958a90..725c94be85 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -32,6 +32,7 @@ #include "directory.h" #include "natorch.h" #include "muxorch.h" +#include "bfdorch.h" using namespace swss; diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 02de369e80..f0e1515579 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -2186,3 +2186,17 @@ bool RouteOrch::removeOverlayNextHops(sai_object_id_t vrf_id, const NextHopGroup return true; } +void RouteOrch::increaseNextHopGroupCount() +{ + m_nextHopGroupCount ++; +} + +void RouteOrch::decreaseNextHopGroupCount() +{ + m_nextHopGroupCount --; +} + +bool RouteOrch::checkNextHopGroupCount() +{ + return m_nextHopGroupCount < m_maxNextHopGroupCount; +} diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 3370df5a53..cd996bbc58 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -137,6 +137,10 @@ class RouteOrch : public Orch, public Subject bool createFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id, vector &nhg_attrs); bool removeFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id); + void increaseNextHopGroupCount(); + void decreaseNextHopGroupCount(); + bool checkNextHopGroupCount(); + private: SwitchOrch *m_switchOrch; NeighOrch *m_neighOrch; diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 38712642bc..03c0f93dd9 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -61,6 +61,7 @@ sai_dtel_api_t* sai_dtel_api; sai_samplepacket_api_t* sai_samplepacket_api; sai_debug_counter_api_t* sai_debug_counter_api; sai_nat_api_t* sai_nat_api; +sai_bfd_api_t* sai_bfd_api; extern sai_object_id_t gSwitchId; extern bool gSairedisRecord; @@ -179,6 +180,7 @@ void initSaiApi() sai_api_query(SAI_API_SAMPLEPACKET, (void **)&sai_samplepacket_api); sai_api_query(SAI_API_DEBUG_COUNTER, (void **)&sai_debug_counter_api); sai_api_query(SAI_API_NAT, (void **)&sai_nat_api); + sai_api_query(SAI_API_BFD, (void **)&sai_bfd_api); sai_log_set(SAI_API_SWITCH, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BRIDGE, SAI_LOG_LEVEL_NOTICE); @@ -207,6 +209,7 @@ void initSaiApi() sai_log_set(SAI_API_SAMPLEPACKET, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_DEBUG_COUNTER, SAI_LOG_LEVEL_NOTICE); sai_log_set((sai_api_t)SAI_API_NAT, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_BFD, SAI_LOG_LEVEL_NOTICE); } void initSaiRedis(const string &record_location, const std::string &record_filename) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index efc60d22c2..7b8a2ada12 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -20,6 +20,7 @@ #include "intfsorch.h" #include "neighorch.h" #include "crmorch.h" +#include "routeorch.h" extern sai_virtual_router_api_t* sai_virtual_router_api; extern sai_route_api_t* sai_route_api; @@ -28,6 +29,7 @@ extern sai_router_interface_api_t* sai_router_intfs_api; extern sai_fdb_api_t* sai_fdb_api; extern sai_neighbor_api_t* sai_neighbor_api; extern sai_next_hop_api_t* sai_next_hop_api; +extern sai_next_hop_group_api_t* sai_next_hop_group_api; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern Directory gDirectory; @@ -35,7 +37,9 @@ extern PortsOrch *gPortsOrch; extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; +extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; +extern BfdOrch *gBfdOrch; /* * VRF Modeling and VNetVrf class definitions @@ -150,15 +154,18 @@ bool VNetVrfObject::hasRoute(IpPrefix& ipPrefix) return false; } -bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, tunnelEndpoint& endp) +bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops) { - if (hasRoute(ipPrefix)) + if (nexthops.is_overlay_nexthop()) { - SWSS_LOG_INFO("VNET route '%s' exists", ipPrefix.to_string().c_str()); + tunnels_[ipPrefix] = nexthops; + } + else + { + SWSS_LOG_ERROR("Input %s is not overlay nexthop group", nexthops.to_string().c_str()); return false; } - tunnels_[ipPrefix] = endp; return true; } @@ -237,8 +244,6 @@ bool VNetVrfObject::removeRoute(IpPrefix& ipPrefix) if (tunnels_.find(ipPrefix) != tunnels_.end()) { - auto endp = tunnels_.at(ipPrefix); - removeTunnelNextHop(endp); tunnels_.erase(ipPrefix); } else @@ -267,32 +272,32 @@ bool VNetVrfObject::getRouteNextHop(IpPrefix& ipPrefix, nextHop& nh) return true; } -sai_object_id_t VNetVrfObject::getTunnelNextHop(tunnelEndpoint& endp) +sai_object_id_t VNetVrfObject::getTunnelNextHop(NextHopKey& nh) { sai_object_id_t nh_id = SAI_NULL_OBJECT_ID; auto tun_name = getTunnelName(); VxlanTunnelOrch* vxlan_orch = gDirectory.get(); - nh_id = vxlan_orch->createNextHopTunnel(tun_name, endp.ip, endp.mac, endp.vni); + nh_id = vxlan_orch->createNextHopTunnel(tun_name, nh.ip_address, nh.mac_address, nh.vni); if (nh_id == SAI_NULL_OBJECT_ID) { - throw std::runtime_error("NH Tunnel create failed for " + vnet_name_ + " ip " + endp.ip.to_string()); + throw std::runtime_error("NH Tunnel create failed for " + vnet_name_ + " ip " + nh.ip_address.to_string()); } return nh_id; } -bool VNetVrfObject::removeTunnelNextHop(tunnelEndpoint& endp) +bool VNetVrfObject::removeTunnelNextHop(NextHopKey& nh) { auto tun_name = getTunnelName(); VxlanTunnelOrch* vxlan_orch = gDirectory.get(); - if (!vxlan_orch->removeNextHopTunnel(tun_name, endp.ip, endp.mac, endp.vni)) + if (!vxlan_orch->removeNextHopTunnel(tun_name, nh.ip_address, nh.mac_address, nh.vni)) { SWSS_LOG_ERROR("VNET %s Tunnel NextHop remove failed for '%s'", - vnet_name_.c_str(), endp.ip.to_string().c_str()); + vnet_name_.c_str(), nh.ip_address.to_string().c_str()); return false; } @@ -391,7 +396,7 @@ bool VNetOrch::addOperation(const Request& request) sai_attribute_t attr; vector attrs; set peer_list = {}; - bool peer = false, create = false; + bool peer = false, create = false, advertise_prefix = false; uint32_t vni=0; string tunnel; string scope; @@ -422,6 +427,10 @@ bool VNetOrch::addOperation(const Request& request) { scope = request.getAttrString("scope"); } + else if (name == "advertise_prefix") + { + advertise_prefix = request.getAttrBool("advertise_prefix"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -448,7 +457,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -554,9 +563,14 @@ static bool del_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx) route_entry.destination = ip_pfx; sai_status_t status = sai_route_api->remove_route_entry(&route_entry); - if (status != SAI_STATUS_SUCCESS) + if (status == SAI_STATUS_ITEM_NOT_FOUND || status == SAI_STATUS_INVALID_PARAMETER) + { + SWSS_LOG_INFO("Unable to remove route since route is already removed"); + return true; + } + else if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("SAI Failed to remove route"); + SWSS_LOG_ERROR("SAI Failed to remove route, rv: %d", status); return false; } @@ -603,18 +617,208 @@ static bool add_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_object return true; } +static bool update_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_object_id_t nh_id) +{ + sai_route_entry_t route_entry; + route_entry.vr_id = vr_id; + route_entry.switch_id = gSwitchId; + route_entry.destination = ip_pfx; + + sai_attribute_t route_attr; + + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = nh_id; + + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("SAI failed to update route"); + return false; + } + + return true; +} + VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *vnetOrch) - : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch) + : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME) { SWSS_LOG_ENTER(); handler_map_.insert(handler_pair(APP_VNET_RT_TABLE_NAME, &VNetRouteOrch::handleRoutes)); handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); + + state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); + state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); + + gBfdOrch->attach(this); +} + +bool VNetRouteOrch::hasNextHopGroup(const string& vnet, const NextHopGroupKey& nexthops) +{ + return syncd_nexthop_groups_[vnet].find(nexthops) != syncd_nexthop_groups_[vnet].end(); +} + +sai_object_id_t VNetRouteOrch::getNextHopGroupId(const string& vnet, const NextHopGroupKey& nexthops) +{ + assert(hasNextHopGroup(vnet, nexthops)); + return syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; +} + +bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj) +{ + SWSS_LOG_ENTER(); + + assert(!hasNextHopGroup(vnet, nexthops)); + + if (!gRouteOrch->checkNextHopGroupCount()) + { + SWSS_LOG_ERROR("Reached maximum number of next hop groups. Failed to create new next hop group."); + return false; + } + + vector next_hop_ids; + set next_hop_set = nexthops.getNextHops(); + std::map nhopgroup_members_set; + + for (auto it : next_hop_set) + { + if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) + { + continue; + } + sai_object_id_t next_hop_id = vrf_obj->getTunnelNextHop(it); + next_hop_ids.push_back(next_hop_id); + nhopgroup_members_set[next_hop_id] = it; + } + + sai_attribute_t nhg_attr; + vector nhg_attrs; + + nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; + nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attrs.push_back(nhg_attr); + + sai_object_id_t next_hop_group_id; + sai_status_t status = sai_next_hop_group_api->create_next_hop_group(&next_hop_group_id, + gSwitchId, + (uint32_t)nhg_attrs.size(), + nhg_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create next hop group %s, rv:%d", + nexthops.to_string().c_str(), status); + return false; + } + + gRouteOrch->increaseNextHopGroupCount(); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP); + SWSS_LOG_NOTICE("Create next hop group %s", nexthops.to_string().c_str()); + + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = next_hop_group_id; + + for (auto nhid: next_hop_ids) + { + // Create a next hop group member + vector nhgm_attrs; + + sai_attribute_t nhgm_attr; + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + nhgm_attr.value.oid = next_hop_group_id; + nhgm_attrs.push_back(nhgm_attr); + + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + nhgm_attr.value.oid = nhid; + nhgm_attrs.push_back(nhgm_attr); + + sai_object_id_t next_hop_group_member_id; + status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, + gSwitchId, + (uint32_t)nhgm_attrs.size(), + nhgm_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create next hop group %" PRIx64 " member %" PRIx64 ": %d\n", + next_hop_group_id, next_hop_group_member_id, status); + return false; + } + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + + // Save the membership into next hop structure + next_hop_group_entry.active_members[nhopgroup_members_set.find(nhid)->second] = + next_hop_group_member_id; + } + + /* + * Initialize the next hop group structure with ref_count as 0. This + * count will increase once the route is successfully syncd. + */ + next_hop_group_entry.ref_count = 0; + syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; + + return true; +} + +bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t next_hop_group_id; + auto next_hop_group_entry = syncd_nexthop_groups_[vnet].find(nexthops); + sai_status_t status; + + assert(next_hop_group_entry != syncd_nexthop_groups_[vnet].end()); + + if (next_hop_group_entry->second.ref_count != 0) + { + return true; + } + + next_hop_group_id = next_hop_group_entry->second.next_hop_group_id; + SWSS_LOG_NOTICE("Delete next hop group %s", nexthops.to_string().c_str()); + + for (auto nhop = next_hop_group_entry->second.active_members.begin(); + nhop != next_hop_group_entry->second.active_members.end();) + { + NextHopKey nexthop = nhop->first; + + status = sai_next_hop_group_api->remove_next_hop_group_member(nhop->second); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove next hop group member %" PRIx64 ", rv:%d", + nhop->second, status); + return false; + } + + vrf_obj->removeTunnelNextHop(nexthop); + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + nhop = next_hop_group_entry->second.active_members.erase(nhop); + } + + status = sai_next_hop_group_api->remove_next_hop_group(next_hop_group_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove next hop group %" PRIx64 ", rv:%d", next_hop_group_id, status); + return false; + } + + gRouteOrch->decreaseNextHopGroupCount(); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP); + + syncd_nexthop_groups_[vnet].erase(nexthops); + + return true; } template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, - tunnelEndpoint& endp, string& op) + NextHopGroupKey& nexthops, string& op, + const map& monitors) { SWSS_LOG_ENTER(); @@ -648,29 +852,248 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP auto *vrf_obj = vnet_orch_->getTypePtr(vnet); sai_ip_prefix_t pfx; copy(pfx, ipPrefix); - sai_object_id_t nh_id = (op == SET_COMMAND)?vrf_obj->getTunnelNextHop(endp):SAI_NULL_OBJECT_ID; - for (auto vr_id : vr_set) + if (op == SET_COMMAND) { - if (op == SET_COMMAND && !add_route(vr_id, pfx, nh_id)) + sai_object_id_t nh_id; + if (!hasNextHopGroup(vnet, nexthops)) { - SWSS_LOG_ERROR("Route add failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); - return false; + setEndpointMonitor(vnet, monitors, nexthops); + if (nexthops.getSize() == 1) + { + NextHopKey nexthop(nexthops.to_string(), true); + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); + next_hop_group_entry.ref_count = 0; + if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } + syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; + } + else + { + if (!addNextHopGroup(vnet, nexthops, vrf_obj)) + { + delEndpointMonitor(vnet, nexthops); + SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); + return false; + } + } } - else if (op == DEL_COMMAND && !del_route(vr_id, pfx)) + nh_id = syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; + + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + for (auto vr_id : vr_set) + { + bool route_status = true; + + // Remove route if the nexthop group has no active endpoint + if (syncd_nexthop_groups_[vnet][nexthops].active_members.empty()) + { + if (it_route != syncd_tunnel_routes_[vnet].end()) + { + NextHopGroupKey nhg = it_route->second; + // Remove route when updating from a nhg with active member to another nhg without + if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) + { + del_route(vr_id, pfx); + } + } + } + else + { + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + route_status = add_route(vr_id, pfx, nh_id); + } + else + { + NextHopGroupKey nhg = it_route->second; + if (syncd_nexthop_groups_[vnet][nhg].active_members.empty()) + { + route_status = add_route(vr_id, pfx, nh_id); + } + else + { + route_status = update_route(vr_id, pfx, nh_id); + } + } + } + + if (!route_status) + { + SWSS_LOG_ERROR("Route add/update failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + /* Clean up the newly created next hop group entry */ + if (nexthops.getSize() > 1) + { + removeNextHopGroup(vnet, nexthops, vrf_obj); + } + return false; + } + } + + if (it_route != syncd_tunnel_routes_[vnet].end()) { - SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + // In case of updating an existing route, decrease the reference count for the previous nexthop group + NextHopGroupKey nhg = it_route->second; + if(--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + { + if (nexthops.getSize() > 1) + { + removeNextHopGroup(vnet, nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(nhg); + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + delEndpointMonitor(vnet, nhg); + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + } + vrf_obj->removeRoute(ipPrefix); + } + + syncd_nexthop_groups_[vnet][nexthops].tunnel_routes.insert(ipPrefix); + + syncd_tunnel_routes_[vnet][ipPrefix] = nexthops; + syncd_nexthop_groups_[vnet][nexthops].ref_count++; + vrf_obj->addRoute(ipPrefix, nexthops); + + postRouteState(vnet, ipPrefix, nexthops); + } + else if (op == DEL_COMMAND) + { + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + SWSS_LOG_INFO("Failed to find tunnel route entry, prefix %s\n", + ipPrefix.to_string().c_str()); + return true; + } + NextHopGroupKey nhg = it_route->second; + + for (auto vr_id : vr_set) + { + // If an nhg has no active member, the route should already be removed + if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) + { + if (!del_route(vr_id, pfx)) + { + SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } + } + } + + if(--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + { + if (nhg.getSize() > 1) + { + removeNextHopGroup(vnet, nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(nhg); + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + delEndpointMonitor(vnet, nhg); + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + } + + syncd_tunnel_routes_[vnet].erase(ipPrefix); + if (syncd_tunnel_routes_[vnet].empty()) + { + syncd_tunnel_routes_.erase(vnet); + } + + vrf_obj->removeRoute(ipPrefix); + + removeRouteState(vnet, ipPrefix); + } + + return true; +} + +bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, + NextHopGroupKey& nexthops, string& op) +{ + SWSS_LOG_ENTER(); + + if (!vnet_orch_->isVnetExists(vnet)) + { + SWSS_LOG_WARN("VNET %s doesn't exist for prefix %s, op %s", + vnet.c_str(), ipPrefix.to_string().c_str(), op.c_str()); + return (op == DEL_COMMAND)?true:false; + } + + set vr_set; + auto& peer_list = vnet_orch_->getPeerList(vnet); + + auto l_fn = [&] (const string& vnet) { + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + sai_object_id_t vr_id = vnet_obj->getVRidIngress(); + vr_set.insert(vr_id); + }; + + l_fn(vnet); + for (auto peer : peer_list) + { + if (!vnet_orch_->isVnetExists(peer)) + { + SWSS_LOG_INFO("Peer VNET %s not yet created", peer.c_str()); return false; } + l_fn(peer); } + sai_ip_prefix_t pfx; + copy(pfx, ipPrefix); + if (op == SET_COMMAND) { - vrf_obj->addRoute(ipPrefix, endp); + sai_object_id_t nh_id = syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; + + for (auto vr_id : vr_set) + { + bool route_status = true; + + route_status = add_route(vr_id, pfx, nh_id); + + if (!route_status) + { + SWSS_LOG_ERROR("Route add failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } + } } - else + else if (op == DEL_COMMAND) { - vrf_obj->removeRoute(ipPrefix); + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + SWSS_LOG_INFO("Failed to find tunnel route entry, prefix %s\n", + ipPrefix.to_string().c_str()); + return true; + } + NextHopGroupKey nhg = it_route->second; + + for (auto vr_id : vr_set) + { + if (!del_route(vr_id, pfx)) + { + SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } + } } return true; @@ -1037,27 +1460,366 @@ void VNetRouteOrch::delRoute(const IpPrefix& ipPrefix) syncd_routes_.erase(route_itr); } +void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (nexthop_info_[vnet].find(endpoint_addr) != nexthop_info_[vnet].end()) + { + SWSS_LOG_ERROR("BFD session for endpoint %s already exist", endpoint_addr.to_string().c_str()); + return; + } + + if (bfd_sessions_.find(monitor_addr) == bfd_sessions_.end()) + { + vector data; + string key = "default:default:" + monitor_addr.to_string(); + + auto tun_name = vnet_orch_->getTunnelName(vnet); + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + auto tunnel_obj = vxlan_orch->getVxlanTunnel(tun_name); + IpAddress src_ip = tunnel_obj->getSrcIP(); + + FieldValueTuple fvTuple("local_addr", src_ip.to_string()); + data.push_back(fvTuple); + + bfd_session_producer_.set(key, data); + + bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN; + } + + BfdSessionInfo& bfd_info = bfd_sessions_[monitor_addr]; + bfd_info.vnet = vnet; + bfd_info.endpoint = endpoint; + VNetNextHopInfo nexthop_info; + nexthop_info.monitor_addr = monitor_addr; + nexthop_info.bfd_state = bfd_info.bfd_state; + nexthop_info.ref_count = 0; + nexthop_info_[vnet][endpoint_addr] = nexthop_info; +} + +void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (nexthop_info_[vnet].find(endpoint_addr) == nexthop_info_[vnet].end()) + { + SWSS_LOG_ERROR("BFD session for endpoint %s does not exist", endpoint_addr.to_string().c_str()); + } + nexthop_info_[vnet].erase(endpoint_addr); + + string key = "default:default:" + monitor_addr.to_string(); + + bfd_session_producer_.del(key); + + bfd_sessions_.erase(monitor_addr); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +{ + SWSS_LOG_ENTER(); + + for (auto monitor : monitors) + { + NextHopKey nh = monitor.first; + IpAddress monitor_ip = monitor.second; + if (nexthop_info_[vnet].find(nh.ip_address) == nexthop_info_[vnet].end()) + { + createBfdSession(vnet, nh, monitor_ip); + } + + nexthop_info_[vnet][nh.ip_address].ref_count++; + } +} + +void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops) +{ + SWSS_LOG_ENTER(); + + std::set nhks = nexthops.getNextHops(); + for (auto nhk: nhks) + { + IpAddress ip = nhk.ip_address; + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + removeBfdSession(vnet, nhk, nexthop_info_[vnet][ip].monitor_addr); + } + } + } +} + +void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops) +{ + const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); + vector fvVector; + + NextHopGroupInfo& nhg_info = syncd_nexthop_groups_[vnet][nexthops]; + string route_state = nhg_info.active_members.empty() ? "inactive" : "active"; + string ep_str = ""; + int idx_ep = 0; + for (auto nh_pair : nhg_info.active_members) + { + NextHopKey nh = nh_pair.first; + ep_str += idx_ep == 0 ? nh.ip_address.to_string() : "," + nh.ip_address.to_string(); + idx_ep++; + } + + fvVector.emplace_back("active_endpoints", ep_str); + fvVector.emplace_back("state", route_state); + + state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); + + if (vnet_orch_->getAdvertisePrefix(vnet)) + { + if (route_state == "active") + { + addRouteAdvertisement(ipPrefix); + } + else + { + removeRouteAdvertisement(ipPrefix); + } + } +} + +void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) +{ + const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); + state_vnet_rt_tunnel_table_->del(state_db_key); + removeRouteAdvertisement(ipPrefix); +} + +void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + vector fvs; + fvs.push_back(FieldValueTuple("", "")); + state_vnet_rt_adv_table_->set(key, fvs); +} + +void VNetRouteOrch::removeRouteAdvertisement(IpPrefix& ipPrefix) +{ + const string key = ipPrefix.to_string(); + state_vnet_rt_adv_table_->del(key); +} + +void VNetRouteOrch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + assert(cntx); + + switch(type) { + case SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE: + { + BfdUpdate *update = static_cast(cntx); + updateVnetTunnel(*update); + break; + } + default: + // Received update in which we are not interested + // Ignore it + return; + } +} + +void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) +{ + SWSS_LOG_ENTER(); + + auto key = update.peer; + sai_bfd_session_state_t state = update.state; + + size_t found_vrf = key.find(state_db_key_delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + return; + } + + size_t found_ifname = key.find(state_db_key_delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no ifname is given", key.c_str()); + return; + } + + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + + if (alias != "default" || vrf_name != "default") + { + return; + } + + auto it_peer = bfd_sessions_.find(peer_address); + + if (it_peer == bfd_sessions_.end()) { + SWSS_LOG_INFO("No endpoint for BFD peer %s", peer_address.to_string().c_str()); + return; + } + + BfdSessionInfo& bfd_info = it_peer->second; + bfd_info.bfd_state = state; + + string vnet = bfd_info.vnet; + NextHopKey endpoint = bfd_info.endpoint; + auto *vrf_obj = vnet_orch_->getTypePtr(vnet); + + if (syncd_nexthop_groups_.find(vnet) == syncd_nexthop_groups_.end()) + { + SWSS_LOG_ERROR("Vnet %s not found", vnet.c_str()); + return; + } + + nexthop_info_[vnet][endpoint.ip_address].bfd_state = state; + + for (auto& nhg_info_pair : syncd_nexthop_groups_[vnet]) + { + NextHopGroupKey nexthops = nhg_info_pair.first; + NextHopGroupInfo& nhg_info = nhg_info_pair.second; + + if (!(nexthops.contains(endpoint))) + { + continue; + } + + if (state == SAI_BFD_SESSION_STATE_UP) + { + sai_object_id_t next_hop_group_member_id = SAI_NULL_OBJECT_ID; + if (nexthops.getSize() > 1) + { + // Create a next hop group member + vector nhgm_attrs; + + sai_attribute_t nhgm_attr; + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + nhgm_attr.value.oid = nhg_info.next_hop_group_id; + nhgm_attrs.push_back(nhgm_attr); + + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + nhgm_attr.value.oid = vrf_obj->getTunnelNextHop(endpoint); + nhgm_attrs.push_back(nhgm_attr); + + sai_status_t status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, + gSwitchId, + (uint32_t)nhgm_attrs.size(), + nhgm_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to add next hop member to group %" PRIx64 ": %d\n", + nhg_info.next_hop_group_id, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + continue; + } + } + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + } + + // Re-create routes when it was temporarily removed + if (nhg_info.active_members.empty()) + { + nhg_info.active_members[endpoint] = next_hop_group_member_id; + if (vnet_orch_->isVnetExecVrf()) + { + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string op = SET_COMMAND; + updateTunnelRoute(vnet, ip_pfx, nexthops, op); + } + } + } + else + { + nhg_info.active_members[endpoint] = next_hop_group_member_id; + } + } + else + { + if (nexthops.getSize() > 1 && nhg_info.active_members.find(endpoint) != nhg_info.active_members.end()) + { + sai_object_id_t nexthop_id = nhg_info.active_members[endpoint]; + sai_status_t status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove next hop member %" PRIx64 " from group %" PRIx64 ": %d\n", + nexthop_id, nhg_info.next_hop_group_id, status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + continue; + } + } + + vrf_obj->removeTunnelNextHop(endpoint); + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + } + + if (nhg_info.active_members.find(endpoint) != nhg_info.active_members.end()) + { + nhg_info.active_members.erase(endpoint); + + // Remove routes when nexthop group has no active endpoint + if (nhg_info.active_members.empty()) + { + if (vnet_orch_->isVnetExecVrf()) + { + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string op = DEL_COMMAND; + updateTunnelRoute(vnet, ip_pfx, nexthops, op); + } + } + } + } + } + + // Post configured in State DB + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + postRouteState(vnet, ip_pfx, nexthops); + } + } +} + bool VNetRouteOrch::handleTunnel(const Request& request) { SWSS_LOG_ENTER(); - IpAddress ip; - MacAddress mac; - uint32_t vni = 0; + vector ip_list; + vector mac_list; + vector vni_list; + vector monitor_list; for (const auto& name: request.getAttrFieldNames()) { if (name == "endpoint") { - ip = request.getAttrIP(name); + ip_list = request.getAttrIPList(name); } else if (name == "vni") { - vni = static_cast(request.getAttrUint(name)); + string vni_str = request.getAttrString(name); + vni_list = tokenize(vni_str, ','); } else if (name == "mac_address") { - mac = request.getAttrMacAddress(name); + string mac_str = request.getAttrString(name); + mac_list = tokenize(mac_str, ','); + } + else if (name == "endpoint_monitor") + { + monitor_list = request.getAttrIPList(name); } else { @@ -1066,6 +1828,24 @@ bool VNetRouteOrch::handleTunnel(const Request& request) } } + if (vni_list.size() > 1 && vni_list.size() != ip_list.size()) + { + SWSS_LOG_ERROR("VNI size of %zu does not match endpoint size of %zu", vni_list.size(), ip_list.size()); + return false; + } + + if (!mac_list.empty() && mac_list.size() != ip_list.size()) + { + SWSS_LOG_ERROR("MAC address size of %zu does not match endpoint size of %zu", mac_list.size(), ip_list.size()); + return false; + } + + if (!monitor_list.empty() && monitor_list.size() != ip_list.size()) + { + SWSS_LOG_ERROR("Peer monitor size of %zu does not match endpoint size of %zu", monitor_list.size(), ip_list.size()); + return false; + } + const std::string& vnet_name = request.getKeyString(0); auto ip_pfx = request.getKeyIpPrefix(1); auto op = request.getOperation(); @@ -1073,11 +1853,38 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_INFO("VNET-RT '%s' op '%s' for pfx %s", vnet_name.c_str(), op.c_str(), ip_pfx.to_string().c_str()); - tunnelEndpoint endp = { ip, mac, vni }; + NextHopGroupKey nhg("", true); + map monitors; + for (size_t idx_ip = 0; idx_ip < ip_list.size(); idx_ip++) + { + IpAddress ip = ip_list[idx_ip]; + MacAddress mac; + uint32_t vni = 0; + if (vni_list.size() == 1 && vni_list[0] != "") + { + vni = (uint32_t)stoul(vni_list[0]); + } + else if (vni_list.size() > 1 && vni_list[idx_ip] != "") + { + vni = (uint32_t)stoul(vni_list[idx_ip]); + } + + if (!mac_list.empty() && mac_list[idx_ip] != "") + { + mac = MacAddress(mac_list[idx_ip]); + } + + NextHopKey nh(ip, mac, vni, true); + nhg.add(nh); + if (!monitor_list.empty()) + { + monitors[nh] = monitor_list[idx_ip]; + } + } if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, endp, op); + return doRouteTask(vnet_name, ip_pfx, nhg, op, monitors); } return true; diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 2ca48ec3a0..53c4acf1e6 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -12,6 +12,9 @@ #include "ipaddresses.h" #include "producerstatetable.h" #include "observer.h" +#include "intfsorch.h" +#include "nexthopgroupkey.h" +#include "bfdorch.h" #define VNET_BITMAP_SIZE 32 #define VNET_TUNNEL_SIZE 40960 @@ -25,12 +28,13 @@ extern sai_object_id_t gVirtualRouterId; const request_description_t vnet_request_description = { { REQ_T_STRING }, { - { "src_mac", REQ_T_MAC_ADDRESS }, - { "vxlan_tunnel", REQ_T_STRING }, - { "vni", REQ_T_UINT }, - { "peer_list", REQ_T_SET }, - { "guid", REQ_T_STRING }, - { "scope", REQ_T_STRING }, + { "src_mac", REQ_T_MAC_ADDRESS }, + { "vxlan_tunnel", REQ_T_STRING }, + { "vni", REQ_T_UINT }, + { "peer_list", REQ_T_SET }, + { "guid", REQ_T_STRING }, + { "scope", REQ_T_STRING }, + { "advertise_prefix", REQ_T_BOOL}, }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -55,6 +59,7 @@ struct VNetInfo uint32_t vni; set peers; string scope; + bool advertise_prefix; }; typedef map vrid_list_t; @@ -66,11 +71,12 @@ class VNetRequest : public Request VNetRequest() : Request(vnet_request_description, ':') { } }; -struct tunnelEndpoint +struct NextHopGroupInfo { - IpAddress ip; - MacAddress mac; - uint32_t vni; + sai_object_id_t next_hop_group_id; // next hop group id (null for single nexthop) + int ref_count; // reference count + std::map active_members; // active nexthops and nexthop group member id (null for single nexthop) + std::set tunnel_routes; }; class VNetObject @@ -80,7 +86,8 @@ class VNetObject tunnel_(vnetInfo.tunnel), peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), - scope_(vnetInfo.scope) + scope_(vnetInfo.scope), + advertise_prefix_(vnetInfo.advertise_prefix) { } virtual bool updateObj(vector&) = 0; @@ -110,6 +117,11 @@ class VNetObject return scope_; } + bool getAdvertisePrefix() const + { + return advertise_prefix_; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -117,6 +129,7 @@ class VNetObject string tunnel_; uint32_t vni_; string scope_; + bool advertise_prefix_; }; struct nextHop @@ -125,7 +138,7 @@ struct nextHop string ifname; }; -typedef std::map TunnelRoutes; +typedef std::map TunnelRoutes; typedef std::map RouteMap; class VNetVrfObject : public VNetObject @@ -165,7 +178,7 @@ class VNetVrfObject : public VNetObject bool updateObj(vector&); - bool addRoute(IpPrefix& ipPrefix, tunnelEndpoint& endp); + bool addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops); bool addRoute(IpPrefix& ipPrefix, nextHop& nh); bool removeRoute(IpPrefix& ipPrefix); @@ -173,8 +186,8 @@ class VNetVrfObject : public VNetObject bool getRouteNextHop(IpPrefix& ipPrefix, nextHop& nh); bool hasRoute(IpPrefix& ipPrefix); - sai_object_id_t getTunnelNextHop(tunnelEndpoint& endp); - bool removeTunnelNextHop(tunnelEndpoint& endp); + sai_object_id_t getTunnelNextHop(NextHopKey& nh); + bool removeTunnelNextHop(NextHopKey& nh); void increaseNextHopRefCount(const nextHop&); void decreaseNextHopRefCount(const nextHop&); @@ -220,6 +233,11 @@ class VNetOrch : public Orch2 return vnet_table_.at(name)->getTunnelName(); } + bool getAdvertisePrefix(const std::string& name) const + { + return vnet_table_.at(name)->getAdvertisePrefix(); + } + bool isVnetExecVrf() const { return (vnet_exec_ == VNET_EXEC::VNET_EXEC_VRF); @@ -246,11 +264,12 @@ class VNetOrch : public Orch2 const request_description_t vnet_route_description = { { REQ_T_STRING, REQ_T_IP_PREFIX }, { - { "endpoint", REQ_T_IP }, - { "ifname", REQ_T_STRING }, - { "nexthop", REQ_T_STRING }, - { "vni", REQ_T_UINT }, - { "mac_address", REQ_T_MAC_ADDRESS }, + { "endpoint", REQ_T_IP_LIST }, + { "ifname", REQ_T_STRING }, + { "nexthop", REQ_T_STRING }, + { "vni", REQ_T_STRING }, + { "mac_address", REQ_T_STRING }, + { "endpoint_monitor", REQ_T_IP_LIST }, }, { } }; @@ -281,7 +300,26 @@ struct VNetNextHopObserverEntry /* NextHopObserverTable: Destination IP address, next hop observer entry */ typedef std::map VNetNextHopObserverTable; -class VNetRouteOrch : public Orch2, public Subject +struct VNetNextHopInfo +{ + IpAddress monitor_addr; + sai_bfd_session_state_t bfd_state; + int ref_count; +}; + +struct BfdSessionInfo +{ + sai_bfd_session_state_t bfd_state; + std::string vnet; + NextHopKey endpoint; +}; + +typedef std::map VNetNextHopGroupInfoTable; +typedef std::map VNetTunnelRouteTable; +typedef std::map BfdSessionTable; +typedef std::map VNetEndpointInfoTable; + +class VNetRouteOrch : public Orch2, public Subject, public Observer { public: VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *); @@ -292,6 +330,8 @@ class VNetRouteOrch : public Orch2, public Subject void attach(Observer* observer, const IpAddress& dstAddr); void detach(Observer* observer, const IpAddress& dstAddr); + void update(SubjectType, void *); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -302,8 +342,26 @@ class VNetRouteOrch : public Orch2, public Subject bool handleRoutes(const Request&); bool handleTunnel(const Request&); + bool hasNextHopGroup(const string&, const NextHopGroupKey&); + sai_object_id_t getNextHopGroupId(const string&, const NextHopGroupKey&); + bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + bool removeNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + + void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); + void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); + void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); + void removeRouteState(const string& vnet, IpPrefix& ipPrefix); + void addRouteAdvertisement(IpPrefix& ipPrefix); + void removeRouteAdvertisement(IpPrefix& ipPrefix); + + void updateVnetTunnel(const BfdUpdate&); + bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + template - bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, tunnelEndpoint& endp, string& op); + bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, + const std::map& monitors=std::map()); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op); @@ -314,6 +372,14 @@ class VNetRouteOrch : public Orch2, public Subject VNetRouteTable syncd_routes_; VNetNextHopObserverTable next_hop_observers_; + std::map syncd_nexthop_groups_; + std::map syncd_tunnel_routes_; + BfdSessionTable bfd_sessions_; + std::map nexthop_info_; + ProducerStateTable bfd_session_producer_; + shared_ptr state_db_; + unique_ptr
state_vnet_rt_tunnel_table_; + unique_ptr
state_vnet_rt_adv_table_; }; class VNetCfgRouteOrch : public Orch diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 9df5e34bb6..ad000ec8db 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -171,6 +171,10 @@ class VxlanTunnel return ids_.tunnel_term_id; } + const IpAddress getSrcIP() + { + return src_ip_; + } void updateNextHop(IpAddress& ipAddr, MacAddress macAddress, uint32_t vni, sai_object_id_t nhId); bool removeNextHop(IpAddress& ipAddr, MacAddress macAddress, uint32_t vni); diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 143a3faab0..68b79ae182 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -66,7 +66,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/sfloworch.cpp \ $(top_srcdir)/orchagent/debugcounterorch.cpp \ $(top_srcdir)/orchagent/natorch.cpp \ - $(top_srcdir)/orchagent/muxorch.cpp + $(top_srcdir)/orchagent/muxorch.cpp \ + $(top_srcdir)/orchagent/bfdorch.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp diff --git a/tests/test_bfd.py b/tests/test_bfd.py new file mode 100644 index 0000000000..0e8b167360 --- /dev/null +++ b/tests/test_bfd.py @@ -0,0 +1,466 @@ +import pytest +import time + +from swsscommon import swsscommon + +class TestBfd(object): + def setup_db(self, dvs): + dvs.setup_db() + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.sdb = dvs.get_state_db() + + def get_exist_bfd_session(self): + return set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")) + + def create_bfd_session(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "BFD_SESSION_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_bfd_session(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "BFD_SESSION_TABLE") + tbl._del(key) + + def check_asic_bfd_session_value(self, key, expected_values): + fvs = self.adb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def check_state_bfd_session_value(self, key, expected_values): + fvs = self.sdb.get_entry("BFD_SESSION_TABLE", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def update_bfd_session_state(self, dvs, session, state): + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+session+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + + def test_addRemoveBfdSession(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_ipv6(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "2000::1"} + self.create_bfd_session("default:default:2000::2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "2000::1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value("default|default|2000::2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Init") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Init" + self.check_state_bfd_session_value("default|default|2000::2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:2000::2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_interface(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "dst_mac": "00:02:03:04:05:06"} + self.create_bfd_session("default:Ethernet0:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_BFD_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_BFD_SESSION_ATTR_DST_MAC_ADDRESS": "00:02:03:04:05:06" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value("default|Ethernet0|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Down") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Down" + self.check_state_bfd_session_value("default|Ethernet0|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:Ethernet0:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_txrx_interval(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "tx_interval": "300", "rx_interval": "500"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", + "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", + "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Admin_Down") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Admin_Down" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_multiplier(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "multiplier": "5"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_BFD_SESSION_ATTR_MULTIPLIER": "5" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "5", "multihop": "false"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_multihop(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "multihop": "true"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_BFD_SESSION_ATTR_MULTIHOP": "true" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "true"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_addRemoveBfdSession_type(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + def test_multipleBfdSessions(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session 1 + key1 = "default:default:10.0.0.2" + fieldValues = {"local_addr": "10.0.0.1"} + self.create_bfd_session(key1, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked BFD session 1 in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session1 = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session1, expected_adb_values) + + # Check STATE_DB entry related to the BFD session 1 + key_state_db1 = "default|default|10.0.0.2" + expected_sdb_values1 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value(key_state_db1, expected_sdb_values1) + + # Create BFD session 2 + key2 = "default:default:10.0.1.2" + fieldValues = {"local_addr": "10.0.0.1", "tx_interval": "300", "rx_interval": "500"} + self.create_bfd_session(key2, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked BFD session 2 in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session2 = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.1.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", + "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", + } + self.check_asic_bfd_session_value(session2, expected_adb_values) + + # Check STATE_DB entry related to the BFD session 2 + key_state_db2 = "default|default|10.0.1.2" + expected_sdb_values2 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", + "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value(key_state_db2, expected_sdb_values2) + + # Create BFD session 3 + key3 = "default:default:2000::2" + fieldValues = {"local_addr": "2000::1", "type": "demand_active"} + self.create_bfd_session(key3, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked BFD session 3 in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session3 = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" + } + self.check_asic_bfd_session_value(session3, expected_adb_values) + + # Check STATE_DB entry related to the BFD session 3 + key_state_db3 = "default|default|2000::2" + expected_sdb_values3 = {"state": "Down", "type": "demand_active", "local_addr" : "2000::1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value(key_state_db3, expected_sdb_values3) + + # Create BFD session 4 + key4 = "default:default:3000::2" + fieldValues = {"local_addr": "3000::1"} + self.create_bfd_session(key4, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked BFD session 4 in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session4 = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "3000::1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "3000::2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" + } + self.check_asic_bfd_session_value(session4, expected_adb_values) + + # Check STATE_DB entry related to the BFD session 4 + key_state_db4 = "default|default|3000::2" + expected_sdb_values4 = {"state": "Down", "type": "async_active", "local_addr" : "3000::1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + self.check_state_bfd_session_value(key_state_db4, expected_sdb_values4) + + # Update BFD session states + self.update_bfd_session_state(dvs, session1, "Up") + expected_sdb_values1["state"] = "Up" + self.update_bfd_session_state(dvs, session3, "Init") + expected_sdb_values3["state"] = "Init" + self.update_bfd_session_state(dvs, session4, "Admin_Down") + expected_sdb_values4["state"] = "Admin_Down" + time.sleep(2) + + # Confirm BFD session states in STATE_DB are updated as expected + self.check_state_bfd_session_value(key_state_db1, expected_sdb_values1) + self.check_state_bfd_session_value(key_state_db2, expected_sdb_values2) + self.check_state_bfd_session_value(key_state_db3, expected_sdb_values3) + self.check_state_bfd_session_value(key_state_db4, expected_sdb_values4) + + # Remove the BFD sessions + self.remove_bfd_session(key1) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session1) + self.remove_bfd_session(key2) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session2) + self.remove_bfd_session(key3) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session3) + self.remove_bfd_session(key4) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session4) diff --git a/tests/test_vnet.py b/tests/test_vnet.py index c7fd3c1225..997d97018c 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -6,6 +6,7 @@ from swsscommon import swsscommon from pprint import pprint +from dvslib.dvs_common import wait_for_result def create_entry(tbl, key, pairs): @@ -139,7 +140,11 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name): time.sleep(2) -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0): +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor) + + +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) attrs = [ @@ -152,11 +157,12 @@ def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0): if mac: attrs.append(('mac_address', mac)) - create_entry_tbl( - conf_db, - "VNET_ROUTE_TUNNEL", '|', "%s|%s" % (vnet_name, prefix), - attrs, - ) + if ep_monitor: + attrs.append(('endpoint_monitor', ep_monitor)) + + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") + fvs = swsscommon.FieldValuePairs(attrs) + tbl.set("%s|%s" % (vnet_name, prefix), fvs) time.sleep(2) @@ -308,7 +314,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -321,6 +327,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope=""): if scope: attrs.append(('scope', scope)) + if advertise_prefix: + attrs.append(('advertise_prefix', 'true')) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -414,6 +423,86 @@ def check_linux_intf_arp_proxy(dvs, ifname): assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" +def update_bfd_session_state(dvs, addr, state): + bfd_id = get_bfd_session_id(dvs, addr) + assert bfd_id is not None + + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + + +def get_bfd_session_id(dvs, addr): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") + entries = set(tbl.getKeys()) + for entry in entries: + status, fvs = tbl.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr: + return entry + + return None + + +def check_del_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is None + + +def check_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is not None + + +def check_state_db_routes(dvs, vnet, prefix, endpoints): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + + status, fvs = tbl.get(vnet + '|' + prefix) + assert status, "Got an error when get a key" + + fvs = dict(fvs) + assert fvs['active_endpoints'] == ','.join(endpoints) + + if endpoints: + assert fvs['state'] == 'active' + else: + assert fvs['state'] == 'inactive' + + +def check_remove_state_db_routes(dvs, vnet, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + keys = tbl.getKeys() + + assert vnet + '|' + prefix not in keys + + +def check_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix in keys + + +def check_remove_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix not in keys + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -429,7 +518,10 @@ class VnetVxlanVrfTunnel(object): ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" - ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" + ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" + ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" + ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" tunnel_map_ids = set() tunnel_map_entry_ids = set() @@ -440,6 +532,7 @@ class VnetVxlanVrfTunnel(object): vnet_vr_ids = set() vr_map = {} nh_ids = {} + nhg_ids = {} def fetch_exist_entries(self, dvs): self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) @@ -450,6 +543,8 @@ def fetch_exist_entries(self, dvs): self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE) self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) + self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) + self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) global loopback_id, def_vr_id, switch_mac if not loopback_id: @@ -670,7 +765,7 @@ def check_del_vnet_local_routes(self, dvs, name): # TODO: Implement for VRF VNET return True - def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0): + def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0, route_ids=""): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) vr_ids = self.vnet_route_ids(dvs, name) @@ -697,7 +792,10 @@ def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0): self.nhops.add(new_nh) check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr) - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + if not route_ids: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + else: + new_route = route_ids #Check if the route is in expected VRF asic_vrs = set() @@ -714,8 +812,107 @@ def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0): self.routes.update(new_route) - def check_del_vnet_routes(self, dvs, name): + return new_route + + def serialize_endpoint_group(self, endpoints): + endpoints.sort() + return ",".join(endpoints) + + def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attrs): + expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + entries = set(tbl_nhgm.getKeys()) + endpoints = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + endpoints.append(endpoint) + assert endpoint in expected_attrs + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) + + assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str + + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg=""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) + + vr_ids = self.vnet_route_ids(dvs, name) + count = len(vr_ids) + + expected_attrs = {} + for idx, endpoint in enumerate(endpoints): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + expected_attrs[endpoint] = expected_attr + + if nhg: + new_nhg = nhg + elif endpoint_str in self.nhg_ids: + new_nhg = self.nhg_ids[endpoint_str] + else: + new_nhg = get_created_entry(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + self.nhg_ids[endpoint_str] = new_nhg + self.nhgs.add(new_nhg) + + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, new_nhg, endpoints, expected_attrs) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + asic_vrs = set() + for idx in range(count): + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + return new_route, new_nhg + + def check_del_vnet_routes(self, dvs, name, prefixes=[]): # TODO: Implement for VRF VNET + + def _access_function(): + route_entries = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + route_prefixes = [json.loads(route_entry)["dest"] for route_entry in route_entries] + return (all(prefix not in route_prefixes for prefix in prefixes), None) + + if prefixes: + wait_for_result(_access_function) + return True @@ -751,6 +948,9 @@ def test_vnet_orch_1(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') @@ -771,6 +971,9 @@ def test_vnet_orch_1(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') @@ -788,9 +991,13 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') + check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") vnet_obj.check_del_router_interface(dvs, "Ethernet4") @@ -831,18 +1038,29 @@ def test_vnet_orch_2(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.11/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) + check_remove_routes_advertisement(dvs, "1.1.1.12/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.14/32") create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1') @@ -858,10 +1076,16 @@ def test_vnet_orch_2(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.11/32") create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2') @@ -876,21 +1100,33 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") + check_remove_routes_advertisement(dvs, "2.2.2.11/32") delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") + check_remove_routes_advertisement(dvs, "2.2.2.10/32") delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") + check_remove_routes_advertisement(dvs, "1.1.1.14/32") delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") + check_remove_routes_advertisement(dvs, "1.1.1.12/32") delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") + check_remove_routes_advertisement(dvs, "1.1.1.11/32") delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") + check_remove_routes_advertisement(dvs, "1.1.1.10/32") delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan1002") @@ -937,10 +1173,16 @@ def test_vnet_orch_3(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10') @@ -958,9 +1200,13 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') + check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') + check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan2001") @@ -1000,9 +1246,15 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.2/32") create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') @@ -1022,6 +1274,9 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') @@ -1039,17 +1294,27 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) + check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) + check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") # Clean-up and verify remove flows delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') + check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') + check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") delete_vnet_entry(dvs, 'Vnet3003') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') @@ -1059,6 +1324,8 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') + check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") + check_remove_routes_advertisement(dvs, "100.100.2.1/24") delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002') @@ -1077,9 +1344,13 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") + check_remove_routes_advertisement(dvs, "100.100.1.2/32") delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan300") @@ -1125,6 +1396,675 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): create_vxlan_tunnel_map(dvs, tunnel_name, 'map_1', 'Vlan1000', '1000') + ''' + Test 7 - Test for vnet tunnel routes with ECMP nexthop group + ''' + def test_vnet_orch_7(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_7' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + create_vnet_entry(dvs, 'Vnet7', tunnel_name, '10007', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet7') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet7', '10007') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the tunnel route to another nexthop group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Create another tunnel route to the same set of endpoints + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + assert nhg2_1 == nhg1_2 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 in vnet_obj.nhgs + + # Remove the other tunnel route + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet7') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet7') + + ''' + Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group + ''' + def test_vnet_orch_8(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_8' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet8', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet8') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet8', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Set the tunnel route to another nexthop group + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Create another tunnel route to the same set of endpoints + create_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") + + assert nhg2_1 == nhg1_2 + + # Create another tunnel route with ipv4 prefix to the same set of endpoints + create_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.0.0.0/24") + + assert nhg3_1 == nhg1_2 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 in vnet_obj.nhgs + + # Remove tunnel route 2 + delete_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128") + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") + + # Remove tunnel route 3 + delete_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24") + check_remove_routes_advertisement(dvs, "8.0.0.0/24") + + # Check the nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet8') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet8') + + + ''' + Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_9(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_9' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet9', tunnel_name, '10009', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet9') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet9', '10009') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '9.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + update_bfd_session_state(dvs, '9.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + update_bfd_session_state(dvs, '9.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.5']) + check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) + + delete_vnet_entry(dvs, 'Vnet9') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet9') + + + ''' + Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_10(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_10' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, 'Vnet10', tunnel_name, '10010', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet10') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet10', '10010') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Set the route to a new group + set_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128") + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet10') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet10') + + + ''' + Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor + ''' + def test_vnet_orch_11(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_11' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') + create_vnet_entry(dvs, 'Vnet11', tunnel_name, '100011', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet11') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet11', '100011') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.1', ep_monitor='11.1.0.1') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when bfd session state goes up + update_bfd_session_state(dvs, '11.1.0.1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", ['11.0.0.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11', '11.0.0.1,11.0.0.2', ep_monitor='11.1.0.1,11.1.0.2') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Create a third tunnel route with another endpoint + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '11.1.0.2', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.3.1/32") + + update_bfd_session_state(dvs, '11.1.0.1', 'Down') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Set the route1 to a new endpoint + vnet_obj.fetch_exist_entries(dvs) + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.3.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['11.1.0.1']) + check_bfd_session(dvs, ['11.1.0.2']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove tunnel route 3 + delete_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32") + check_remove_routes_advertisement(dvs, "100.100.3.1/32") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) + + delete_vnet_entry(dvs, 'Vnet11') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet11') + + + ''' + Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement + ''' + def test_vnet_orch_12(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_12' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '12.1.0.1', 'Up') + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + update_bfd_session_state(dvs, '12.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '12.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + update_bfd_session_state(dvs, '12.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '12.1.0.1', 'Down') + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + update_bfd_session_state(dvs, '12.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['12.1.0.5']) + check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) + + delete_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying