2018-01-07 05:05:16 +00:00
|
|
|
// Copyright (c) 2014-2018, The Monero Project
|
2014-07-23 13:03:52 +00:00
|
|
|
//
|
|
|
|
// All rights reserved.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification, are
|
|
|
|
// permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
|
|
// conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
|
|
// of conditions and the following disclaimer in the documentation and/or other
|
|
|
|
// materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
|
|
// used to endorse or promote products derived from this software without specific
|
|
|
|
// prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
|
|
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
//
|
|
|
|
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <list>
|
|
|
|
#include <set>
|
|
|
|
#include <map>
|
|
|
|
#include <boost/archive/binary_iarchive.hpp>
|
2016-12-20 04:04:19 +00:00
|
|
|
#include <boost/archive/portable_binary_oarchive.hpp>
|
|
|
|
#include <boost/archive/portable_binary_iarchive.hpp>
|
2014-03-03 22:07:58 +00:00
|
|
|
#include <boost/serialization/version.hpp>
|
|
|
|
|
|
|
|
#include <boost/multi_index_container.hpp>
|
|
|
|
#include <boost/multi_index/ordered_index.hpp>
|
|
|
|
#include <boost/multi_index/identity.hpp>
|
|
|
|
#include <boost/multi_index/member.hpp>
|
2017-01-22 20:47:39 +00:00
|
|
|
#include <boost/range/adaptor/reversed.hpp>
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
#include "syncobj.h"
|
|
|
|
#include "net/local_ip.h"
|
|
|
|
#include "p2p_protocol_defs.h"
|
|
|
|
#include "cryptonote_config.h"
|
|
|
|
#include "net_peerlist_boost_serialization.h"
|
|
|
|
|
|
|
|
|
2017-05-27 10:35:54 +00:00
|
|
|
#define CURRENT_PEERLIST_STORAGE_ARCHIVE_VER 6
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
namespace nodetool
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
/************************************************************************/
|
|
|
|
/* */
|
|
|
|
/************************************************************************/
|
|
|
|
class peerlist_manager
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
bool init(bool allow_local_ip);
|
|
|
|
bool deinit();
|
|
|
|
size_t get_white_peers_count(){CRITICAL_REGION_LOCAL(m_peerlist_lock); return m_peers_white.size();}
|
|
|
|
size_t get_gray_peers_count(){CRITICAL_REGION_LOCAL(m_peerlist_lock); return m_peers_gray.size();}
|
2018-12-05 22:25:27 +00:00
|
|
|
bool merge_peerlist(const std::vector<peerlist_entry>& outer_bs);
|
|
|
|
bool get_peerlist_head(std::vector<peerlist_entry>& bs_head, uint32_t depth = P2P_DEFAULT_PEERS_IN_HANDSHAKE);
|
|
|
|
bool get_peerlist_full(std::vector<peerlist_entry>& pl_gray, std::vector<peerlist_entry>& pl_white);
|
2014-03-03 22:07:58 +00:00
|
|
|
bool get_white_peer_by_index(peerlist_entry& p, size_t i);
|
|
|
|
bool get_gray_peer_by_index(peerlist_entry& p, size_t i);
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
template<typename F> bool foreach(bool white, const F &f);
|
2014-03-03 22:07:58 +00:00
|
|
|
bool append_with_peer_white(const peerlist_entry& pr);
|
|
|
|
bool append_with_peer_gray(const peerlist_entry& pr);
|
2017-02-09 00:11:58 +00:00
|
|
|
bool append_with_peer_anchor(const anchor_peerlist_entry& ple);
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
bool set_peer_just_seen(peerid_type peer, const epee::net_utils::network_address& addr, uint32_t pruning_seed);
|
2014-03-03 22:07:58 +00:00
|
|
|
bool set_peer_unreachable(const peerlist_entry& pr);
|
2017-05-27 10:35:54 +00:00
|
|
|
bool is_host_allowed(const epee::net_utils::network_address &address);
|
2017-01-21 12:04:49 +00:00
|
|
|
bool get_random_gray_peer(peerlist_entry& pe);
|
2017-01-20 23:59:04 +00:00
|
|
|
bool remove_from_peer_gray(const peerlist_entry& pe);
|
2017-02-09 00:11:58 +00:00
|
|
|
bool get_and_empty_anchor_peerlist(std::vector<anchor_peerlist_entry>& apl);
|
2017-05-27 10:35:54 +00:00
|
|
|
bool remove_from_peer_anchor(const epee::net_utils::network_address& addr);
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
bool remove_from_peer_white(const peerlist_entry& pe);
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
struct by_time{};
|
|
|
|
struct by_id{};
|
|
|
|
struct by_addr{};
|
|
|
|
|
|
|
|
struct modify_all_but_id
|
|
|
|
{
|
|
|
|
modify_all_but_id(const peerlist_entry& ple):m_ple(ple){}
|
|
|
|
void operator()(peerlist_entry& e)
|
|
|
|
{
|
|
|
|
e.id = m_ple.id;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
const peerlist_entry& m_ple;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct modify_all
|
|
|
|
{
|
|
|
|
modify_all(const peerlist_entry& ple):m_ple(ple){}
|
|
|
|
void operator()(peerlist_entry& e)
|
|
|
|
{
|
|
|
|
e = m_ple;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
const peerlist_entry& m_ple;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct modify_last_seen
|
|
|
|
{
|
|
|
|
modify_last_seen(time_t last_seen):m_last_seen(last_seen){}
|
|
|
|
void operator()(peerlist_entry& e)
|
|
|
|
{
|
|
|
|
e.last_seen = m_last_seen;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
time_t m_last_seen;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
typedef boost::multi_index_container<
|
|
|
|
peerlist_entry,
|
|
|
|
boost::multi_index::indexed_by<
|
|
|
|
// access by peerlist_entry::net_adress
|
2017-05-27 10:35:54 +00:00
|
|
|
boost::multi_index::ordered_unique<boost::multi_index::tag<by_addr>, boost::multi_index::member<peerlist_entry,epee::net_utils::network_address,&peerlist_entry::adr> >,
|
2014-03-03 22:07:58 +00:00
|
|
|
// sort by peerlist_entry::last_seen<
|
2014-08-20 15:57:29 +00:00
|
|
|
boost::multi_index::ordered_non_unique<boost::multi_index::tag<by_time>, boost::multi_index::member<peerlist_entry,int64_t,&peerlist_entry::last_seen> >
|
2014-03-03 22:07:58 +00:00
|
|
|
>
|
|
|
|
> peers_indexed;
|
|
|
|
|
|
|
|
typedef boost::multi_index_container<
|
|
|
|
peerlist_entry,
|
|
|
|
boost::multi_index::indexed_by<
|
|
|
|
// access by peerlist_entry::id<
|
|
|
|
boost::multi_index::ordered_unique<boost::multi_index::tag<by_id>, boost::multi_index::member<peerlist_entry,uint64_t,&peerlist_entry::id> >,
|
|
|
|
// access by peerlist_entry::net_adress
|
2017-05-27 10:35:54 +00:00
|
|
|
boost::multi_index::ordered_unique<boost::multi_index::tag<by_addr>, boost::multi_index::member<peerlist_entry,epee::net_utils::network_address,&peerlist_entry::adr> >,
|
2014-03-03 22:07:58 +00:00
|
|
|
// sort by peerlist_entry::last_seen<
|
2014-08-20 15:57:29 +00:00
|
|
|
boost::multi_index::ordered_non_unique<boost::multi_index::tag<by_time>, boost::multi_index::member<peerlist_entry,int64_t,&peerlist_entry::last_seen> >
|
2014-03-03 22:07:58 +00:00
|
|
|
>
|
|
|
|
> peers_indexed_old;
|
2017-02-09 00:11:58 +00:00
|
|
|
|
|
|
|
typedef boost::multi_index_container<
|
|
|
|
anchor_peerlist_entry,
|
|
|
|
boost::multi_index::indexed_by<
|
|
|
|
// access by anchor_peerlist_entry::net_adress
|
2017-05-27 10:35:54 +00:00
|
|
|
boost::multi_index::ordered_unique<boost::multi_index::tag<by_addr>, boost::multi_index::member<anchor_peerlist_entry,epee::net_utils::network_address,&anchor_peerlist_entry::adr> >,
|
2017-02-09 00:11:58 +00:00
|
|
|
// sort by anchor_peerlist_entry::first_seen
|
|
|
|
boost::multi_index::ordered_non_unique<boost::multi_index::tag<by_time>, boost::multi_index::member<anchor_peerlist_entry,int64_t,&anchor_peerlist_entry::first_seen> >
|
|
|
|
>
|
|
|
|
> anchor_peers_indexed;
|
2014-03-03 22:07:58 +00:00
|
|
|
public:
|
|
|
|
|
2017-05-27 10:35:54 +00:00
|
|
|
template <class Archive, class List, class Element, class t_version_type>
|
|
|
|
void serialize_peers(Archive &a, List &list, Element ple, const t_version_type ver)
|
|
|
|
{
|
|
|
|
if (typename Archive::is_saving())
|
|
|
|
{
|
|
|
|
uint64_t size = list.size();
|
|
|
|
a & size;
|
|
|
|
for (auto p: list)
|
|
|
|
{
|
|
|
|
a & p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint64_t size;
|
|
|
|
a & size;
|
|
|
|
list.clear();
|
|
|
|
while (size--)
|
|
|
|
{
|
|
|
|
a & ple;
|
|
|
|
list.insert(ple);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-03 22:07:58 +00:00
|
|
|
template <class Archive, class t_version_type>
|
|
|
|
void serialize(Archive &a, const t_version_type ver)
|
|
|
|
{
|
2017-05-27 10:35:54 +00:00
|
|
|
// at v6, we drop existing peerlists, because annoying change
|
|
|
|
if (ver < 6)
|
|
|
|
return;
|
|
|
|
|
2014-03-03 22:07:58 +00:00
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
2017-02-09 00:11:58 +00:00
|
|
|
|
2017-05-27 10:35:54 +00:00
|
|
|
#if 0
|
|
|
|
// trouble loading more than one peer, can't find why
|
2014-03-03 22:07:58 +00:00
|
|
|
a & m_peers_white;
|
|
|
|
a & m_peers_gray;
|
2017-09-10 11:29:48 +00:00
|
|
|
a & m_peers_anchor;
|
2017-05-27 10:35:54 +00:00
|
|
|
#else
|
|
|
|
serialize_peers(a, m_peers_white, peerlist_entry(), ver);
|
|
|
|
serialize_peers(a, m_peers_gray, peerlist_entry(), ver);
|
|
|
|
serialize_peers(a, m_peers_anchor, anchor_peerlist_entry(), ver);
|
|
|
|
#endif
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool peers_indexed_from_old(const peers_indexed_old& pio, peers_indexed& pi);
|
2015-12-30 10:36:51 +00:00
|
|
|
void trim_white_peerlist();
|
|
|
|
void trim_gray_peerlist();
|
2014-03-03 22:07:58 +00:00
|
|
|
|
|
|
|
friend class boost::serialization::access;
|
|
|
|
epee::critical_section m_peerlist_lock;
|
|
|
|
std::string m_config_folder;
|
|
|
|
bool m_allow_local_ip;
|
|
|
|
|
|
|
|
|
|
|
|
peers_indexed m_peers_gray;
|
|
|
|
peers_indexed m_peers_white;
|
2017-02-09 00:11:58 +00:00
|
|
|
anchor_peers_indexed m_peers_anchor;
|
2014-03-03 22:07:58 +00:00
|
|
|
};
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::init(bool allow_local_ip)
|
|
|
|
{
|
|
|
|
m_allow_local_ip = allow_local_ip;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::deinit()
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::peers_indexed_from_old(const peers_indexed_old& pio, peers_indexed& pi)
|
|
|
|
{
|
|
|
|
for(auto x: pio)
|
|
|
|
{
|
|
|
|
auto by_addr_it = pi.get<by_addr>().find(x.adr);
|
|
|
|
if(by_addr_it == pi.get<by_addr>().end())
|
|
|
|
{
|
|
|
|
pi.insert(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
2015-05-28 13:06:19 +00:00
|
|
|
inline void peerlist_manager::trim_gray_peerlist()
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
while(m_peers_gray.size() > P2P_LOCAL_GRAY_PEERLIST_LIMIT)
|
|
|
|
{
|
|
|
|
peers_indexed::index<by_time>::type& sorted_index=m_peers_gray.get<by_time>();
|
|
|
|
sorted_index.erase(sorted_index.begin());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
2015-05-28 13:06:19 +00:00
|
|
|
inline void peerlist_manager::trim_white_peerlist()
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
while(m_peers_white.size() > P2P_LOCAL_WHITE_PEERLIST_LIMIT)
|
|
|
|
{
|
|
|
|
peers_indexed::index<by_time>::type& sorted_index=m_peers_white.get<by_time>();
|
|
|
|
sorted_index.erase(sorted_index.begin());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2018-12-05 22:25:27 +00:00
|
|
|
bool peerlist_manager::merge_peerlist(const std::vector<peerlist_entry>& outer_bs)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
2017-01-22 20:38:10 +00:00
|
|
|
for(const peerlist_entry& be: outer_bs)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
append_with_peer_gray(be);
|
|
|
|
}
|
|
|
|
// delete extra elements
|
|
|
|
trim_gray_peerlist();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::get_white_peer_by_index(peerlist_entry& p, size_t i)
|
|
|
|
{
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
if(i >= m_peers_white.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index = m_peers_white.get<by_time>();
|
|
|
|
p = *epee::misc_utils::move_it_backward(--by_time_index.end(), i);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::get_gray_peer_by_index(peerlist_entry& p, size_t i)
|
|
|
|
{
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
if(i >= m_peers_gray.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index = m_peers_gray.get<by_time>();
|
|
|
|
p = *epee::misc_utils::move_it_backward(--by_time_index.end(), i);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2017-05-27 10:35:54 +00:00
|
|
|
bool peerlist_manager::is_host_allowed(const epee::net_utils::network_address &address)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
//never allow loopback ip
|
2017-05-27 10:35:54 +00:00
|
|
|
if(address.is_loopback())
|
2014-03-03 22:07:58 +00:00
|
|
|
return false;
|
|
|
|
|
2017-05-27 10:35:54 +00:00
|
|
|
if(!m_allow_local_ip && address.is_local())
|
2014-03-03 22:07:58 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2018-12-05 22:25:27 +00:00
|
|
|
bool peerlist_manager::get_peerlist_head(std::vector<peerlist_entry>& bs_head, uint32_t depth)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index=m_peers_white.get<by_time>();
|
|
|
|
uint32_t cnt = 0;
|
2018-12-05 22:25:27 +00:00
|
|
|
bs_head.reserve(depth);
|
2017-01-22 20:47:39 +00:00
|
|
|
for(const peers_indexed::value_type& vl: boost::adaptors::reverse(by_time_index))
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
if(!vl.last_seen)
|
|
|
|
continue;
|
2017-01-18 17:53:11 +00:00
|
|
|
|
|
|
|
if(cnt++ >= depth)
|
2014-03-03 22:07:58 +00:00
|
|
|
break;
|
2017-01-18 17:53:11 +00:00
|
|
|
|
|
|
|
bs_head.push_back(vl);
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2018-12-05 22:25:27 +00:00
|
|
|
bool peerlist_manager::get_peerlist_full(std::vector<peerlist_entry>& pl_gray, std::vector<peerlist_entry>& pl_white)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index_gr=m_peers_gray.get<by_time>();
|
2018-12-05 22:25:27 +00:00
|
|
|
pl_gray.resize(pl_gray.size() + by_time_index_gr.size());
|
2017-01-22 20:47:39 +00:00
|
|
|
for(const peers_indexed::value_type& vl: boost::adaptors::reverse(by_time_index_gr))
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
pl_gray.push_back(vl);
|
|
|
|
}
|
|
|
|
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index_wt=m_peers_white.get<by_time>();
|
2018-12-05 22:25:27 +00:00
|
|
|
pl_white.resize(pl_white.size() + by_time_index_wt.size());
|
2017-01-22 20:47:39 +00:00
|
|
|
for(const peers_indexed::value_type& vl: boost::adaptors::reverse(by_time_index_wt))
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
pl_white.push_back(vl);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
template<typename F> inline
|
|
|
|
bool peerlist_manager::foreach(bool white, const F &f)
|
|
|
|
{
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index = white ? m_peers_white.get<by_time>() : m_peers_gray.get<by_time>();
|
|
|
|
for(const peers_indexed::value_type& vl: boost::adaptors::reverse(by_time_index))
|
|
|
|
if (!f(vl))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
2014-03-03 22:07:58 +00:00
|
|
|
inline
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
bool peerlist_manager::set_peer_just_seen(peerid_type peer, const epee::net_utils::network_address& addr, uint32_t pruning_seed)
|
2014-03-03 22:07:58 +00:00
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
//find in white list
|
|
|
|
peerlist_entry ple;
|
|
|
|
ple.adr = addr;
|
|
|
|
ple.id = peer;
|
|
|
|
ple.last_seen = time(NULL);
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
ple.pruning_seed = pruning_seed;
|
2014-03-03 22:07:58 +00:00
|
|
|
return append_with_peer_white(ple);
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::set_peer_just_seen()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::append_with_peer_white(const peerlist_entry& ple)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
2017-05-27 10:35:54 +00:00
|
|
|
if(!is_host_allowed(ple.adr))
|
2014-03-03 22:07:58 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
//find in white list
|
|
|
|
auto by_addr_it_wt = m_peers_white.get<by_addr>().find(ple.adr);
|
|
|
|
if(by_addr_it_wt == m_peers_white.get<by_addr>().end())
|
|
|
|
{
|
|
|
|
//put new record into white list
|
|
|
|
m_peers_white.insert(ple);
|
|
|
|
trim_white_peerlist();
|
|
|
|
}else
|
|
|
|
{
|
|
|
|
//update record in white list
|
|
|
|
m_peers_white.replace(by_addr_it_wt, ple);
|
|
|
|
}
|
|
|
|
//remove from gray list, if need
|
|
|
|
auto by_addr_it_gr = m_peers_gray.get<by_addr>().find(ple.adr);
|
|
|
|
if(by_addr_it_gr != m_peers_gray.get<by_addr>().end())
|
|
|
|
{
|
|
|
|
m_peers_gray.erase(by_addr_it_gr);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::append_with_peer_white()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::append_with_peer_gray(const peerlist_entry& ple)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
2017-05-27 10:35:54 +00:00
|
|
|
if(!is_host_allowed(ple.adr))
|
2014-03-03 22:07:58 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
//find in white list
|
|
|
|
auto by_addr_it_wt = m_peers_white.get<by_addr>().find(ple.adr);
|
|
|
|
if(by_addr_it_wt != m_peers_white.get<by_addr>().end())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
//update gray list
|
|
|
|
auto by_addr_it_gr = m_peers_gray.get<by_addr>().find(ple.adr);
|
|
|
|
if(by_addr_it_gr == m_peers_gray.get<by_addr>().end())
|
|
|
|
{
|
|
|
|
//put new record into white list
|
|
|
|
m_peers_gray.insert(ple);
|
|
|
|
trim_gray_peerlist();
|
|
|
|
}else
|
|
|
|
{
|
|
|
|
//update record in white list
|
|
|
|
m_peers_gray.replace(by_addr_it_gr, ple);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::append_with_peer_gray()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
2017-01-20 23:59:04 +00:00
|
|
|
inline
|
2017-02-09 00:11:58 +00:00
|
|
|
bool peerlist_manager::append_with_peer_anchor(const anchor_peerlist_entry& ple)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
auto by_addr_it_anchor = m_peers_anchor.get<by_addr>().find(ple.adr);
|
|
|
|
|
|
|
|
if(by_addr_it_anchor == m_peers_anchor.get<by_addr>().end()) {
|
|
|
|
m_peers_anchor.insert(ple);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::append_with_peer_anchor()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2017-01-21 12:04:49 +00:00
|
|
|
bool peerlist_manager::get_random_gray_peer(peerlist_entry& pe)
|
2017-01-20 23:59:04 +00:00
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
if (m_peers_gray.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-05 22:46:47 +00:00
|
|
|
size_t random_index = crypto::rand<size_t>() % m_peers_gray.size();
|
2017-01-20 23:59:04 +00:00
|
|
|
|
|
|
|
peers_indexed::index<by_time>::type& by_time_index = m_peers_gray.get<by_time>();
|
2017-02-05 22:46:47 +00:00
|
|
|
pe = *epee::misc_utils::move_it_backward(--by_time_index.end(), random_index);
|
2017-01-20 23:59:04 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
|
2017-01-21 12:04:49 +00:00
|
|
|
CATCH_ENTRY_L0("peerlist_manager::get_random_gray_peer()", false);
|
2017-01-20 23:59:04 +00:00
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
bool peerlist_manager::remove_from_peer_white(const peerlist_entry& pe)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
peers_indexed::index_iterator<by_addr>::type iterator = m_peers_white.get<by_addr>().find(pe.adr);
|
|
|
|
|
|
|
|
if (iterator != m_peers_white.get<by_addr>().end()) {
|
|
|
|
m_peers_white.erase(iterator);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::remove_from_peer_white()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2017-01-20 23:59:04 +00:00
|
|
|
bool peerlist_manager::remove_from_peer_gray(const peerlist_entry& pe)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
peers_indexed::index_iterator<by_addr>::type iterator = m_peers_gray.get<by_addr>().find(pe.adr);
|
|
|
|
|
|
|
|
if (iterator != m_peers_gray.get<by_addr>().end()) {
|
|
|
|
m_peers_gray.erase(iterator);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::remove_from_peer_gray()", false);
|
|
|
|
}
|
2017-02-09 00:11:58 +00:00
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
|
|
|
bool peerlist_manager::get_and_empty_anchor_peerlist(std::vector<anchor_peerlist_entry>& apl)
|
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
auto begin = m_peers_anchor.get<by_time>().begin();
|
|
|
|
auto end = m_peers_anchor.get<by_time>().end();
|
|
|
|
|
|
|
|
std::for_each(begin, end, [&apl](const anchor_peerlist_entry &a) {
|
|
|
|
apl.push_back(a);
|
|
|
|
});
|
|
|
|
|
|
|
|
m_peers_anchor.get<by_time>().clear();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::get_and_empty_anchor_peerlist()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
|
|
|
inline
|
2017-05-27 10:35:54 +00:00
|
|
|
bool peerlist_manager::remove_from_peer_anchor(const epee::net_utils::network_address& addr)
|
2017-02-09 00:11:58 +00:00
|
|
|
{
|
|
|
|
TRY_ENTRY();
|
|
|
|
|
|
|
|
CRITICAL_REGION_LOCAL(m_peerlist_lock);
|
|
|
|
|
|
|
|
anchor_peers_indexed::index_iterator<by_addr>::type iterator = m_peers_anchor.get<by_addr>().find(addr);
|
|
|
|
|
|
|
|
if (iterator != m_peers_anchor.get<by_addr>().end()) {
|
|
|
|
m_peers_anchor.erase(iterator);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
CATCH_ENTRY_L0("peerlist_manager::remove_from_peer_anchor()", false);
|
|
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------------------------
|
2014-03-03 22:07:58 +00:00
|
|
|
}
|
|
|
|
|
2015-11-23 17:34:55 +00:00
|
|
|
BOOST_CLASS_VERSION(nodetool::peerlist_manager, CURRENT_PEERLIST_STORAGE_ARCHIVE_VER)
|