mirror of
https://git.wownero.com/wownero/wownero.git
synced 2024-12-23 03:48:52 +00:00
epee: regularly cleanup connections we kept a reference to
Since connections from the ::connect method are now kept in a deque to be able to cancel them on exit, this leaks both memory and a file descriptor. Here, we clean those up after 30 seconds, to avoid this. 30 seconds is higher then the 5 second timeout used in the async code, so this should be safe. However, this is an assumption which would break if that async code was to start relying on longer timeouts.
This commit is contained in:
parent
a06b0b2b95
commit
22581a0441
@ -279,6 +279,8 @@ namespace net_utils
|
|||||||
|
|
||||||
bool is_thread_worker();
|
bool is_thread_worker();
|
||||||
|
|
||||||
|
bool cleanup_connections();
|
||||||
|
|
||||||
/// The io_service used to perform asynchronous operations.
|
/// The io_service used to perform asynchronous operations.
|
||||||
std::unique_ptr<boost::asio::io_service> m_io_service_local_instance;
|
std::unique_ptr<boost::asio::io_service> m_io_service_local_instance;
|
||||||
boost::asio::io_service& io_service_;
|
boost::asio::io_service& io_service_;
|
||||||
@ -306,7 +308,7 @@ namespace net_utils
|
|||||||
connection_ptr new_connection_;
|
connection_ptr new_connection_;
|
||||||
|
|
||||||
std::mutex connections_mutex;
|
std::mutex connections_mutex;
|
||||||
std::deque<connection_ptr> connections_;
|
std::deque<std::pair<boost::system_time, connection_ptr>> connections_;
|
||||||
|
|
||||||
}; // class <>boosted_tcp_server
|
}; // class <>boosted_tcp_server
|
||||||
|
|
||||||
|
@ -55,6 +55,8 @@
|
|||||||
#include "../../../../src/p2p/data_logger.hpp"
|
#include "../../../../src/p2p/data_logger.hpp"
|
||||||
using namespace nOT::nUtils; // TODO
|
using namespace nOT::nUtils; // TODO
|
||||||
|
|
||||||
|
#define CONNECTION_CLEANUP_TIME 30 // seconds
|
||||||
|
|
||||||
PRAGMA_WARNING_PUSH
|
PRAGMA_WARNING_PUSH
|
||||||
namespace epee
|
namespace epee
|
||||||
{
|
{
|
||||||
@ -786,6 +788,7 @@ POP_WARNINGS
|
|||||||
m_threads_count = threads_count;
|
m_threads_count = threads_count;
|
||||||
m_main_thread_id = boost::this_thread::get_id();
|
m_main_thread_id = boost::this_thread::get_id();
|
||||||
log_space::log_singletone::set_thread_log_prefix("[SRV_MAIN]");
|
log_space::log_singletone::set_thread_log_prefix("[SRV_MAIN]");
|
||||||
|
add_idle_handler(boost::bind(&boosted_tcp_server::cleanup_connections, this), 5000);
|
||||||
while(!m_stop_signal_sent)
|
while(!m_stop_signal_sent)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -879,14 +882,28 @@ POP_WARNINGS
|
|||||||
connections_mutex.lock();
|
connections_mutex.lock();
|
||||||
for (auto &c: connections_)
|
for (auto &c: connections_)
|
||||||
{
|
{
|
||||||
c->cancel();
|
c.second->cancel();
|
||||||
}
|
}
|
||||||
|
connections_.clear();
|
||||||
connections_mutex.unlock();
|
connections_mutex.unlock();
|
||||||
io_service_.stop();
|
io_service_.stop();
|
||||||
CATCH_ENTRY_L0("boosted_tcp_server<t_protocol_handler>::send_stop_signal()", void());
|
CATCH_ENTRY_L0("boosted_tcp_server<t_protocol_handler>::send_stop_signal()", void());
|
||||||
}
|
}
|
||||||
//---------------------------------------------------------------------------------
|
//---------------------------------------------------------------------------------
|
||||||
template<class t_protocol_handler>
|
template<class t_protocol_handler>
|
||||||
|
bool boosted_tcp_server<t_protocol_handler>::cleanup_connections()
|
||||||
|
{
|
||||||
|
connections_mutex.lock();
|
||||||
|
boost::system_time cutoff = boost::get_system_time() - boost::posix_time::seconds(CONNECTION_CLEANUP_TIME);
|
||||||
|
while (!connections_.empty() && connections_.front().first < cutoff)
|
||||||
|
{
|
||||||
|
connections_.pop_front();
|
||||||
|
}
|
||||||
|
connections_mutex.unlock();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
//---------------------------------------------------------------------------------
|
||||||
|
template<class t_protocol_handler>
|
||||||
bool boosted_tcp_server<t_protocol_handler>::is_stop_signal_sent()
|
bool boosted_tcp_server<t_protocol_handler>::is_stop_signal_sent()
|
||||||
{
|
{
|
||||||
return m_stop_signal_sent;
|
return m_stop_signal_sent;
|
||||||
@ -926,7 +943,7 @@ POP_WARNINGS
|
|||||||
|
|
||||||
connection_ptr new_connection_l(new connection<t_protocol_handler>(io_service_, m_config, m_sock_count, m_sock_number, m_pfilter, m_connection_type) );
|
connection_ptr new_connection_l(new connection<t_protocol_handler>(io_service_, m_config, m_sock_count, m_sock_number, m_pfilter, m_connection_type) );
|
||||||
connections_mutex.lock();
|
connections_mutex.lock();
|
||||||
connections_.push_back(new_connection_l);
|
connections_.push_back(std::make_pair(boost::get_system_time(), new_connection_l));
|
||||||
LOG_PRINT_L2("connections_ size now " << connections_.size());
|
LOG_PRINT_L2("connections_ size now " << connections_.size());
|
||||||
connections_mutex.unlock();
|
connections_mutex.unlock();
|
||||||
boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket();
|
boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket();
|
||||||
@ -1022,7 +1039,7 @@ POP_WARNINGS
|
|||||||
TRY_ENTRY();
|
TRY_ENTRY();
|
||||||
connection_ptr new_connection_l(new connection<t_protocol_handler>(io_service_, m_config, m_sock_count, m_sock_number, m_pfilter, m_connection_type) );
|
connection_ptr new_connection_l(new connection<t_protocol_handler>(io_service_, m_config, m_sock_count, m_sock_number, m_pfilter, m_connection_type) );
|
||||||
connections_mutex.lock();
|
connections_mutex.lock();
|
||||||
connections_.push_back(new_connection_l);
|
connections_.push_back(std::make_pair(boost::get_system_time(), new_connection_l));
|
||||||
LOG_PRINT_L2("connections_ size now " << connections_.size());
|
LOG_PRINT_L2("connections_ size now " << connections_.size());
|
||||||
connections_mutex.unlock();
|
connections_mutex.unlock();
|
||||||
boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket();
|
boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket();
|
||||||
|
Loading…
Reference in New Issue
Block a user