2010-08-29 16:58:15 +00:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2022-12-24 23:49:50 +00:00
// Copyright (c) 2009-2022 The Bitcoin Core developers
2014-12-13 12:09:33 +08:00
// Distributed under the MIT software license, see the accompanying
2012-05-18 22:02:28 +08:00
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
2010-08-29 16:58:15 +00:00
2013-05-27 19:55:01 -04:00
# if defined(HAVE_CONFIG_H)
2017-11-10 13:57:53 +13:00
# include <config/bitcoin-config.h>
2013-05-27 19:55:01 -04:00
# endif
2017-11-10 13:57:53 +13:00
# include <net.h>
2021-08-20 12:33:24 +02:00
# include <addrdb.h>
2021-11-30 14:49:43 +01:00
# include <addrman.h>
2017-10-05 16:40:43 -04:00
# include <banman.h>
2017-11-10 13:57:53 +13:00
# include <clientversion.h>
2023-03-23 12:23:29 +01:00
# include <common/args.h>
2022-06-28 13:27:57 +01:00
# include <compat/compat.h>
2017-11-10 13:57:53 +13:00
# include <consensus/consensus.h>
# include <crypto/sha256.h>
2020-11-24 11:28:52 +01:00
# include <i2p.h>
2023-03-06 22:01:13 +01:00
# include <logging.h>
2023-07-24 13:23:39 -04:00
# include <memusage.h>
2019-06-20 18:37:51 +09:00
# include <net_permissions.h>
2021-05-02 19:05:42 +02:00
# include <netaddress.h>
2020-06-19 18:14:17 -04:00
# include <netbase.h>
2023-03-06 22:01:13 +01:00
# include <node/eviction.h>
2022-06-14 10:38:51 +02:00
# include <node/interface_ui.h>
2020-05-10 15:47:32 +02:00
# include <protocol.h>
2019-11-23 11:42:23 -05:00
# include <random.h>
2017-11-10 13:57:53 +13:00
# include <scheduler.h>
2023-03-15 11:18:06 +01:00
# include <util/fs.h>
2021-01-04 13:02:43 +01:00
# include <util/sock.h>
2018-10-22 15:51:11 -07:00
# include <util/strencodings.h>
2021-04-13 20:44:46 +03:00
# include <util/thread.h>
2022-10-11 15:33:22 +08:00
# include <util/threadinterrupt.h>
2021-05-20 16:54:54 +02:00
# include <util/trace.h>
2019-06-17 10:56:52 +03:00
# include <util/translation.h>
2023-09-11 13:54:32 -04:00
# include <util/vector.h>
2013-04-13 00:13:08 -05:00
2011-10-07 11:02:21 -04:00
# ifdef WIN32
2011-07-02 03:59:37 +02:00
# include <string.h>
2013-07-17 16:51:40 +08:00
# endif
2021-03-26 12:53:05 +08:00
# if HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS
# include <ifaddrs.h>
# endif
2020-09-30 19:07:36 +03:00
# include <algorithm>
2021-04-20 13:22:20 +02:00
# include <array>
2019-12-29 13:04:02 -08:00
# include <cstdint>
2021-01-13 02:05:00 +01:00
# include <functional>
2021-03-15 11:59:05 +08:00
# include <optional>
2018-09-26 21:54:52 -04:00
# include <unordered_map>
2014-01-30 10:55:55 +01:00
2015-04-08 11:20:00 -07:00
# include <math.h>
2020-09-12 18:05:54 +03:00
/** Maximum number of block-relay-only anchor connections */
static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2 ;
static_assert ( MAX_BLOCK_RELAY_ONLY_ANCHORS < = static_cast < size_t > ( MAX_BLOCK_RELAY_ONLY_CONNECTIONS ) , " MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS. " ) ;
/** Anchor IP address database file name */
const char * const ANCHORS_DATABASE_FILENAME = " anchors.dat " ;
2020-03-06 18:06:50 -05:00
// How often to dump addresses to peers.dat
static constexpr std : : chrono : : minutes DUMP_PEERS_INTERVAL { 15 } ;
2017-10-05 12:46:54 -04:00
2019-03-07 15:30:59 -08:00
/** Number of DNS seeds to query when the number of connections is low. */
static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3 ;
2020-02-11 13:20:21 +10:00
/** How long to delay before querying DNS seeds
2020-05-28 10:07:49 +10:00
*
* If we have more than THRESHOLD entries in addrman , then it ' s likely
* that we got those addresses from having previously connected to the P2P
* network , and that we ' ll be able to successfully reconnect to the P2P
* network via contacting one of them . So if that ' s the case , spend a
* little longer trying to connect to known peers before querying the
* DNS seeds .
2020-02-11 13:20:21 +10:00
*/
2020-05-28 10:07:49 +10:00
static constexpr std : : chrono : : seconds DNSSEEDS_DELAY_FEW_PEERS { 11 } ;
static constexpr std : : chrono : : minutes DNSSEEDS_DELAY_MANY_PEERS { 5 } ;
static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000 ; // "many" vs "few" peers
2020-02-11 13:20:21 +10:00
2020-10-24 16:33:26 +08:00
/** The default timeframe for -maxuploadtarget. 1 day. */
2020-10-24 19:13:42 +08:00
static constexpr std : : chrono : : seconds MAX_UPLOAD_TIMEFRAME { 60 * 60 * 24 } ;
2020-10-24 16:33:26 +08:00
2022-05-10 09:08:49 +02:00
// A random time period (0 to 1 seconds) is added to feeler connections to prevent synchronization.
static constexpr auto FEELER_SLEEP_WINDOW { 1 s } ;
2016-06-17 00:10:07 -04:00
2023-02-14 17:40:14 -05:00
/** Frequency to attempt extra connections to reachable networks we're not connected to yet **/
static constexpr auto EXTRA_NETWORK_PEER_INTERVAL { 5 min } ;
2017-06-01 12:34:02 +02:00
/** Used to pass flags to the Bind() function */
enum BindFlags {
BF_NONE = 0 ,
2022-07-29 13:23:29 +02:00
BF_REPORT_ERROR = ( 1U < < 0 ) ,
2020-09-29 18:03:43 +03:00
/**
* Do not call AddLocal ( ) for our special addresses , e . g . , for incoming
* Tor connections , to prevent gossiping them over the network .
*/
2022-07-29 13:23:29 +02:00
BF_DONT_ADVERTISE = ( 1U < < 1 ) ,
2017-06-01 12:34:02 +02:00
} ;
2018-10-29 16:30:30 -04:00
// The set of sockets cannot be modified while waiting
// The sleep time needs to be small to avoid new sockets stalling
static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50 ;
2022-04-07 17:13:52 +05:30
const std : : string NET_MESSAGE_TYPE_OTHER = " *other* " ;
2015-08-25 16:30:31 +02:00
2016-09-09 12:48:10 +02:00
static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL ; // SHA256("netgroup")[0:8]
2016-10-26 15:10:15 -04:00
static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL ; // SHA256("localhostnonce")[0:8]
2020-08-11 12:41:26 +03:00
static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL ; // SHA256("addrcache")[0:8]
2010-08-29 16:58:15 +00:00
//
// Global state variables
//
2012-05-24 19:02:21 +02:00
bool fDiscover = true ;
2014-05-29 12:33:17 +02:00
bool fListen = true ;
2022-04-20 17:10:13 +10:00
GlobalMutex g_maplocalhost_mutex ;
2022-01-19 07:04:52 -03:00
std : : map < CNetAddr , LocalServiceInfo > mapLocalHost GUARDED_BY ( g_maplocalhost_mutex ) ;
static bool vfLimited [ NET_MAX ] GUARDED_BY ( g_maplocalhost_mutex ) = { } ;
2015-07-31 18:05:42 +02:00
std : : string strSubVersion ;
2010-08-29 16:58:15 +00:00
2023-07-24 13:23:39 -04:00
size_t CSerializedNetMsg : : GetMemoryUsage ( ) const noexcept
{
// Don't count the dynamic memory used for the m_type string, by assuming it fits in the
// "small string" optimization area (which stores data inside the object itself, up to some
// size; 15 bytes in modern libstdc++).
return sizeof ( * this ) + memusage : : DynamicUsage ( data ) ;
}
2020-07-17 14:56:34 -07:00
void CConnman : : AddAddrFetch ( const std : : string & strDest )
2012-04-24 02:15:00 +02:00
{
2020-07-17 14:56:34 -07:00
LOCK ( m_addr_fetches_mutex ) ;
m_addr_fetches . push_back ( strDest ) ;
2012-04-24 02:15:00 +02:00
}
2019-12-29 13:04:02 -08:00
uint16_t GetListenPort ( )
2011-04-21 10:45:08 -04:00
{
2020-10-18 14:45:35 +02:00
// If -bind= is provided with ":port" part, use that (first one if multiple are provided).
for ( const std : : string & bind_arg : gArgs . GetArgs ( " -bind " ) ) {
constexpr uint16_t dummy_port = 0 ;
2022-10-10 15:02:59 -03:00
const std : : optional < CService > bind_addr { Lookup ( bind_arg , dummy_port , /*fAllowLookup=*/ false ) } ;
if ( bind_addr . has_value ( ) & & bind_addr - > GetPort ( ) ! = dummy_port ) return bind_addr - > GetPort ( ) ;
2020-10-18 14:45:35 +02:00
}
// Otherwise, if -whitebind= without NetPermissionFlags::NoBan is provided, use that
// (-whitebind= is required to have ":port").
for ( const std : : string & whitebind_arg : gArgs . GetArgs ( " -whitebind " ) ) {
NetWhitebindPermissions whitebind ;
bilingual_str error ;
if ( NetWhitebindPermissions : : TryParse ( whitebind_arg , whitebind , error ) ) {
if ( ! NetPermissions : : HasFlag ( whitebind . m_flags , NetPermissionFlags : : NoBan ) ) {
return whitebind . m_service . GetPort ( ) ;
}
}
}
// Otherwise, if -port= is provided, use that. Otherwise use the default port.
2019-08-22 21:40:41 -04:00
return static_cast < uint16_t > ( gArgs . GetIntArg ( " -port " , Params ( ) . GetDefaultPort ( ) ) ) ;
2011-04-21 10:45:08 -04:00
}
2010-08-29 16:58:15 +00:00
2023-07-13 12:38:06 -06:00
// Determine the "best" local address for a particular peer.
[[nodiscard]] static std : : optional < CService > GetLocal ( const CNode & peer )
2012-02-12 13:45:24 +01:00
{
2023-07-13 12:38:06 -06:00
if ( ! fListen ) return std : : nullopt ;
2010-08-29 16:58:15 +00:00
2023-07-13 12:38:06 -06:00
std : : optional < CService > addr ;
2012-05-13 00:41:24 +02:00
int nBestScore = - 1 ;
2012-02-12 13:45:24 +01:00
int nBestReachability = - 1 ;
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2023-07-18 10:07:41 -06:00
for ( const auto & [ local_addr , local_service_info ] : mapLocalHost ) {
2023-04-11 15:27:13 -04:00
// For privacy reasons, don't advertise our privacy-network address
// to other networks and don't advertise our other-network address
// to privacy networks.
2023-07-14 06:51:21 -06:00
if ( local_addr . GetNetwork ( ) ! = peer . ConnectedThroughNetwork ( )
& & ( local_addr . IsPrivacyNet ( ) | | peer . IsConnectedThroughPrivacyNet ( ) ) ) {
2023-04-11 15:27:13 -04:00
continue ;
}
2023-07-18 10:07:41 -06:00
const int nScore { local_service_info . nScore } ;
const int nReachability { local_addr . GetReachabilityFrom ( peer . addr ) } ;
if ( nReachability > nBestReachability | | ( nReachability = = nBestReachability & & nScore > nBestScore ) ) {
2023-07-13 12:38:06 -06:00
addr . emplace ( CService { local_addr , local_service_info . nPort } ) ;
2012-02-12 13:45:24 +01:00
nBestReachability = nReachability ;
2012-05-13 00:41:24 +02:00
nBestScore = nScore ;
2012-02-12 13:45:24 +01:00
}
}
}
2023-07-13 12:38:06 -06:00
return addr ;
2012-02-12 13:45:24 +01:00
}
2010-08-29 16:58:15 +00:00
2021-03-31 13:29:24 +02:00
//! Convert the serialized seeds into usable address objects.
static std : : vector < CAddress > ConvertSeeds ( const std : : vector < uint8_t > & vSeedsIn )
2015-01-23 23:40:50 -05:00
{
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps.
// Seed nodes are given a random 'last seen time' of between one and two
// weeks ago.
2022-03-28 14:20:04 +02:00
const auto one_week { 7 * 24 h } ;
2015-01-23 23:40:50 -05:00
std : : vector < CAddress > vSeedsOut ;
2018-10-31 15:02:24 -07:00
FastRandomContext rng ;
2023-01-31 18:04:44 +01:00
DataStream underlying_stream { vSeedsIn } ;
ParamsStream s { CAddress : : V2_NETWORK , underlying_stream } ;
2021-03-31 13:29:24 +02:00
while ( ! s . eof ( ) ) {
CService endpoint ;
s > > endpoint ;
CAddress addr { endpoint , GetDesirableServiceFlags ( NODE_NONE ) } ;
2022-03-28 14:20:04 +02:00
addr . nTime = rng . rand_uniform_delay ( Now < NodeSeconds > ( ) - one_week , - one_week ) ;
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " Added hardcoded seed: %s \n " , addr . ToStringAddrPort ( ) ) ;
2015-01-23 23:40:50 -05:00
vSeedsOut . push_back ( addr ) ;
}
return vSeedsOut ;
}
2023-07-13 12:38:06 -06:00
// Determine the "best" local address for a particular peer.
// If none, return the unroutable 0.0.0.0 but filled in with
2014-07-20 23:32:25 -07:00
// the normal parameters, since the IP may be changed to a useful
// one by discovery.
2023-04-11 15:09:31 -04:00
CService GetLocalAddress ( const CNode & peer )
2012-02-12 13:45:24 +01:00
{
2023-07-13 12:38:06 -06:00
return GetLocal ( peer ) . value_or ( CService { CNetAddr ( ) , GetListenPort ( ) } ) ;
2012-02-12 13:45:24 +01:00
}
2010-08-29 16:58:15 +00:00
2018-05-02 17:14:48 +02:00
static int GetnScore ( const CService & addr )
2012-02-12 13:45:24 +01:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2021-09-06 00:10:31 +02:00
const auto it = mapLocalHost . find ( addr ) ;
return ( it ! = mapLocalHost . end ( ) ) ? it - > second . nScore : 0 ;
2014-07-20 23:32:25 -07:00
}
// Is our peer's addrLocal potentially useful as an external IP source?
2023-07-13 13:01:54 -06:00
[[nodiscard]] static bool IsPeerAddrLocalGood ( CNode * pnode )
2014-07-20 23:32:25 -07:00
{
2017-02-06 12:18:51 -05:00
CService addrLocal = pnode - > GetAddrLocal ( ) ;
return fDiscover & & pnode - > addr . IsRoutable ( ) & & addrLocal . IsRoutable ( ) & &
2019-01-09 16:41:37 -08:00
IsReachable ( addrLocal . GetNetwork ( ) ) ;
2014-07-20 23:32:25 -07:00
}
2022-07-04 18:02:28 +02:00
std : : optional < CService > GetLocalAddrForPeer ( CNode & node )
2014-07-20 23:32:25 -07:00
{
2023-04-11 15:09:31 -04:00
CService addrLocal { GetLocalAddress ( node ) } ;
2020-07-09 07:52:48 +01:00
if ( gArgs . GetBoolArg ( " -addrmantest " , false ) ) {
// use IPv4 loopback during addrmantest
2022-07-04 18:02:28 +02:00
addrLocal = CService ( LookupNumeric ( " 127.0.0.1 " , GetListenPort ( ) ) ) ;
2020-07-09 07:52:48 +01:00
}
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
FastRandomContext rng ;
2022-07-04 18:02:28 +02:00
if ( IsPeerAddrLocalGood ( & node ) & & ( ! addrLocal . IsRoutable ( ) | |
2020-07-09 07:52:48 +01:00
rng . randbits ( ( GetnScore ( addrLocal ) > LOCAL_MANUAL ) ? 3 : 1 ) = = 0 ) )
2012-02-12 13:45:24 +01:00
{
2022-07-04 18:02:28 +02:00
if ( node . IsInboundConn ( ) ) {
2020-10-18 14:45:35 +02:00
// For inbound connections, assume both the address and the port
// as seen from the peer.
2022-07-04 18:02:28 +02:00
addrLocal = CService { node . GetAddrLocal ( ) } ;
2020-10-18 14:45:35 +02:00
} else {
// For outbound connections, assume just the address as seen from
// the peer and leave the port in `addrLocal` as returned by
// `GetLocalAddress()` above. The peer has no way to observe our
// listening port when we have initiated the connection.
2022-07-04 18:02:28 +02:00
addrLocal . SetIP ( node . GetAddrLocal ( ) ) ;
2020-10-18 14:45:35 +02:00
}
2020-07-09 07:52:48 +01:00
}
if ( addrLocal . IsRoutable ( ) | | gArgs . GetBoolArg ( " -addrmantest " , false ) )
{
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " Advertising address %s to peer=%d \n " , addrLocal . ToStringAddrPort ( ) , node . GetId ( ) ) ;
2020-07-09 07:52:48 +01:00
return addrLocal ;
2012-02-12 13:45:24 +01:00
}
2020-07-09 07:42:11 +01:00
// Address is unroutable. Don't advertise.
2021-03-15 10:41:30 +08:00
return std : : nullopt ;
2012-02-12 13:45:24 +01:00
}
2021-09-13 13:02:05 +02:00
/**
* If an IPv6 address belongs to the address range used by the CJDNS network and
* the CJDNS network is reachable ( - cjdnsreachable config is set ) , then change
* the type from NET_IPV6 to NET_CJDNS .
* @ param [ in ] service Address to potentially convert .
* @ return a copy of ` service ` either unmodified or changed to CJDNS .
*/
CService MaybeFlipIPv6toCJDNS ( const CService & service )
{
CService ret { service } ;
2023-07-14 06:51:21 -06:00
if ( ret . IsIPv6 ( ) & & ret . HasCJDNSPrefix ( ) & & IsReachable ( NET_CJDNS ) ) {
2021-09-13 13:02:05 +02:00
ret . m_net = NET_CJDNS ;
}
return ret ;
}
2012-02-12 13:45:24 +01:00
// learn a new local address
2021-09-13 13:02:05 +02:00
bool AddLocal ( const CService & addr_ , int nScore )
2012-02-12 13:45:24 +01:00
{
2021-09-13 13:02:05 +02:00
CService addr { MaybeFlipIPv6toCJDNS ( addr_ ) } ;
2012-02-12 13:45:24 +01:00
if ( ! addr . IsRoutable ( ) )
return false ;
2012-05-24 19:02:21 +02:00
if ( ! fDiscover & & nScore < LOCAL_MANUAL )
2012-05-13 14:11:53 +02:00
return false ;
2019-01-09 16:41:37 -08:00
if ( ! IsReachable ( addr ) )
2012-05-13 15:11:51 +02:00
return false ;
2022-07-15 14:13:39 +02:00
LogPrintf ( " AddLocal(%s,%i) \n " , addr . ToStringAddrPort ( ) , nScore ) ;
2012-02-12 13:45:24 +01:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2021-09-06 00:10:31 +02:00
const auto [ it , is_newly_added ] = mapLocalHost . emplace ( addr , LocalServiceInfo ( ) ) ;
LocalServiceInfo & info = it - > second ;
if ( is_newly_added | | nScore > = info . nScore ) {
info . nScore = nScore + ( is_newly_added ? 0 : 1 ) ;
2012-08-29 02:33:25 +02:00
info . nPort = addr . GetPort ( ) ;
2012-05-13 00:41:24 +02:00
}
2012-02-12 13:45:24 +01:00
}
return true ;
}
2012-05-13 01:26:14 +02:00
bool AddLocal ( const CNetAddr & addr , int nScore )
2012-05-10 20:35:13 +02:00
{
2012-05-13 01:26:14 +02:00
return AddLocal ( CService ( addr , GetListenPort ( ) ) , nScore ) ;
2012-05-10 20:35:13 +02:00
}
2018-07-27 08:22:42 +02:00
void RemoveLocal ( const CService & addr )
2015-09-08 17:48:45 +02:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2022-07-15 14:13:39 +02:00
LogPrintf ( " RemoveLocal(%s) \n " , addr . ToStringAddrPort ( ) ) ;
2015-09-08 17:48:45 +02:00
mapLocalHost . erase ( addr ) ;
}
2019-01-09 16:41:37 -08:00
void SetReachable ( enum Network net , bool reachable )
2012-05-04 16:46:22 +02:00
{
2017-05-23 20:04:38 -04:00
if ( net = = NET_UNROUTABLE | | net = = NET_INTERNAL )
2012-05-14 17:15:58 +02:00
return ;
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2019-01-09 16:41:37 -08:00
vfLimited [ net ] = ! reachable ;
2012-05-04 16:46:22 +02:00
}
2019-01-09 16:41:37 -08:00
bool IsReachable ( enum Network net )
2012-05-04 16:46:22 +02:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2019-01-09 16:41:37 -08:00
return ! vfLimited [ net ] ;
2012-05-14 17:15:58 +02:00
}
2019-01-09 16:41:37 -08:00
bool IsReachable ( const CNetAddr & addr )
2012-05-14 17:15:58 +02:00
{
2019-01-09 16:41:37 -08:00
return IsReachable ( addr . GetNetwork ( ) ) ;
2012-05-04 16:46:22 +02:00
}
/** vote for a local address */
2012-05-10 20:35:13 +02:00
bool SeenLocal ( const CService & addr )
2012-02-12 13:45:24 +01:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2021-09-06 00:10:31 +02:00
const auto it = mapLocalHost . find ( addr ) ;
if ( it = = mapLocalHost . end ( ) ) return false ;
+ + it - > second . nScore ;
2012-02-12 13:45:24 +01:00
return true ;
}
2014-07-20 23:32:25 -07:00
2012-05-04 16:46:22 +02:00
/** check whether a given address is potentially local */
2012-05-10 20:35:13 +02:00
bool IsLocal ( const CService & addr )
2012-02-12 13:45:24 +01:00
{
2022-01-19 07:04:52 -03:00
LOCK ( g_maplocalhost_mutex ) ;
2012-02-12 13:45:24 +01:00
return mapLocalHost . count ( addr ) > 0 ;
}
2010-08-29 16:58:15 +00:00
2016-04-16 19:13:12 -04:00
CNode * CConnman : : FindNode ( const CNetAddr & ip )
2010-08-29 16:58:15 +00:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2017-06-01 16:13:35 +02:00
if ( static_cast < CNetAddr > ( pnode - > addr ) = = ip ) {
2017-07-20 11:32:47 +02:00
return pnode ;
}
}
2017-08-07 07:36:37 +02:00
return nullptr ;
2010-08-29 16:58:15 +00:00
}
2016-04-16 19:13:12 -04:00
CNode * CConnman : : FindNode ( const CSubNet & subNet )
2015-05-25 20:03:51 +02:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2017-06-01 16:13:35 +02:00
if ( subNet . Match ( static_cast < CNetAddr > ( pnode - > addr ) ) ) {
2017-07-20 11:32:47 +02:00
return pnode ;
}
}
2017-08-07 07:36:37 +02:00
return nullptr ;
2015-05-25 20:03:51 +02:00
}
2016-04-16 19:13:12 -04:00
CNode * CConnman : : FindNode ( const std : : string & addrName )
2012-04-19 17:38:03 +02:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2021-08-26 10:39:10 +02:00
if ( pnode - > m_addr_name = = addrName ) {
2017-07-20 11:32:47 +02:00
return pnode ;
2017-02-06 12:04:34 -05:00
}
}
2017-08-07 07:36:37 +02:00
return nullptr ;
2012-04-19 17:38:03 +02:00
}
2016-04-16 19:13:12 -04:00
CNode * CConnman : : FindNode ( const CService & addr )
2010-08-29 16:58:15 +00:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2017-06-01 16:13:35 +02:00
if ( static_cast < CService > ( pnode - > addr ) = = addr ) {
2017-07-20 11:32:47 +02:00
return pnode ;
}
}
2017-08-07 07:36:37 +02:00
return nullptr ;
2010-08-29 16:58:15 +00:00
}
2020-10-16 11:10:17 -04:00
bool CConnman : : AlreadyConnectedToAddress ( const CAddress & addr )
{
2022-07-18 13:28:40 +02:00
return FindNode ( static_cast < CNetAddr > ( addr ) ) | | FindNode ( addr . ToStringAddrPort ( ) ) ;
2020-10-16 11:10:17 -04:00
}
2016-04-17 20:21:58 -04:00
bool CConnman : : CheckIncomingNonce ( uint64_t nonce )
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-07-28 13:39:38 -07:00
if ( ! pnode - > fSuccessfullyConnected & & ! pnode - > IsInboundConn ( ) & & pnode - > GetLocalNonce ( ) = = nonce )
2016-04-17 20:21:58 -04:00
return false ;
}
return true ;
}
2017-05-30 11:59:42 +02:00
/** Get the bind address for a socket as CAddress */
2021-04-13 15:11:20 +02:00
static CAddress GetBindAddress ( const Sock & sock )
2017-05-30 11:59:42 +02:00
{
CAddress addr_bind ;
struct sockaddr_storage sockaddr_bind ;
socklen_t sockaddr_bind_len = sizeof ( sockaddr_bind ) ;
2021-04-13 15:11:20 +02:00
if ( sock . Get ( ) ! = INVALID_SOCKET ) {
if ( ! sock . GetSockName ( ( struct sockaddr * ) & sockaddr_bind , & sockaddr_bind_len ) ) {
2017-05-30 11:59:42 +02:00
addr_bind . SetSockAddr ( ( const struct sockaddr * ) & sockaddr_bind ) ;
} else {
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Warning , " getsockname failed \n " ) ;
2017-05-30 11:59:42 +02:00
}
}
return addr_bind ;
}
2023-08-21 16:55:47 -04:00
CNode * CConnman : : ConnectNode ( CAddress addrConnect , const char * pszDest , bool fCountFailure , ConnectionType conn_type , bool use_v2transport )
2010-08-29 16:58:15 +00:00
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2020-04-29 14:55:59 -07:00
assert ( conn_type ! = ConnectionType : : INBOUND ) ;
2017-08-07 07:36:37 +02:00
if ( pszDest = = nullptr ) {
2012-02-12 13:45:24 +01:00
if ( IsLocal ( addrConnect ) )
2017-08-07 07:36:37 +02:00
return nullptr ;
2010-08-29 16:58:15 +00:00
2012-04-19 17:38:03 +02:00
// Look for an existing connection
2017-06-01 16:13:35 +02:00
CNode * pnode = FindNode ( static_cast < CService > ( addrConnect ) ) ;
2012-04-19 17:38:03 +02:00
if ( pnode )
{
2017-01-24 16:51:22 -05:00
LogPrintf ( " Failed to open new connection, already connected \n " ) ;
2017-08-07 07:36:37 +02:00
return nullptr ;
2012-04-19 17:38:03 +02:00
}
2010-08-29 16:58:15 +00:00
}
2023-08-21 16:55:47 -04:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Debug , " trying %s connection %s lastseen=%.1fhrs \n " ,
use_v2transport ? " v2 " : " v1 " ,
2022-07-15 14:13:39 +02:00
pszDest ? pszDest : addrConnect . ToStringAddrPort ( ) ,
2022-03-24 19:56:00 +01:00
Ticks < HoursDouble > ( pszDest ? 0 h : Now < NodeSeconds > ( ) - addrConnect . nTime ) ) ;
2010-08-29 16:58:15 +00:00
2017-06-22 14:01:04 -04:00
// Resolve
2023-09-12 13:42:52 +02:00
const uint16_t default_port { pszDest ! = nullptr ? GetDefaultPort ( pszDest ) :
2023-09-12 13:42:36 +02:00
m_params . GetDefaultPort ( ) } ;
2017-06-22 14:01:04 -04:00
if ( pszDest ) {
2022-10-10 15:02:59 -03:00
const std : : vector < CService > resolved { Lookup ( pszDest , default_port , fNameLookup & & ! HaveNameProxy ( ) , 256 ) } ;
if ( ! resolved . empty ( ) ) {
const CService & rnd { resolved [ GetRand ( resolved . size ( ) ) ] } ;
2021-09-13 13:02:05 +02:00
addrConnect = CAddress { MaybeFlipIPv6toCJDNS ( rnd ) , NODE_NONE } ;
2017-06-22 14:01:04 -04:00
if ( ! addrConnect . IsValid ( ) ) {
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " Resolver returned invalid address %s for %s \n " , addrConnect . ToStringAddrPort ( ) , pszDest ) ;
2017-06-22 14:01:04 -04:00
return nullptr ;
}
2016-05-28 16:22:02 +02:00
// It is possible that we already have a connection to the IP/port pszDest resolved to.
2021-08-24 19:19:58 +02:00
// In that case, drop the connection that was just created.
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
2017-06-01 16:13:35 +02:00
CNode * pnode = FindNode ( static_cast < CService > ( addrConnect ) ) ;
2021-08-24 19:19:58 +02:00
if ( pnode ) {
2017-01-24 16:51:22 -05:00
LogPrintf ( " Failed to open new connection, already connected \n " ) ;
2017-08-07 07:36:37 +02:00
return nullptr ;
2016-05-28 16:22:02 +02:00
}
}
2017-06-22 14:01:04 -04:00
}
2016-05-28 16:22:02 +02:00
2017-06-22 14:01:04 -04:00
// Connect
bool connected = false ;
2020-12-23 16:40:11 +01:00
std : : unique_ptr < Sock > sock ;
2021-11-08 17:34:32 +01:00
Proxy proxy ;
2020-11-25 14:42:33 +01:00
CAddress addr_bind ;
assert ( ! addr_bind . IsValid ( ) ) ;
2022-06-08 17:59:32 +02:00
std : : unique_ptr < i2p : : sam : : Session > i2p_transient_session ;
2020-11-25 14:42:33 +01:00
2017-06-22 14:01:04 -04:00
if ( addrConnect . IsValid ( ) ) {
2022-06-08 17:59:32 +02:00
const bool use_proxy { GetProxy ( addrConnect . GetNetwork ( ) , proxy ) } ;
2017-06-22 14:01:04 -04:00
bool proxyConnectionFailed = false ;
2023-07-14 06:51:21 -06:00
if ( addrConnect . IsI2P ( ) & & use_proxy ) {
2020-11-25 14:42:33 +01:00
i2p : : Connection conn ;
2022-06-08 17:59:32 +02:00
if ( m_i2p_sam_session ) {
connected = m_i2p_sam_session - > Connect ( addrConnect , conn , proxyConnectionFailed ) ;
} else {
2023-01-06 11:23:46 +01:00
{
LOCK ( m_unused_i2p_sessions_mutex ) ;
if ( m_unused_i2p_sessions . empty ( ) ) {
i2p_transient_session =
std : : make_unique < i2p : : sam : : Session > ( proxy . proxy , & interruptNet ) ;
} else {
i2p_transient_session . swap ( m_unused_i2p_sessions . front ( ) ) ;
m_unused_i2p_sessions . pop ( ) ;
}
}
2022-06-08 17:59:32 +02:00
connected = i2p_transient_session - > Connect ( addrConnect , conn , proxyConnectionFailed ) ;
2023-01-06 11:23:46 +01:00
if ( ! connected ) {
LOCK ( m_unused_i2p_sessions_mutex ) ;
if ( m_unused_i2p_sessions . size ( ) < MAX_UNUSED_I2P_SESSIONS_SIZE ) {
m_unused_i2p_sessions . emplace ( i2p_transient_session . release ( ) ) ;
}
}
2022-06-08 17:59:32 +02:00
}
if ( connected ) {
2021-03-04 14:31:49 +01:00
sock = std : : move ( conn . sock ) ;
2020-11-25 14:42:33 +01:00
addr_bind = CAddress { conn . me , NODE_NONE } ;
}
2022-06-08 17:59:32 +02:00
} else if ( use_proxy ) {
2020-12-23 16:40:11 +01:00
sock = CreateSock ( proxy . proxy ) ;
if ( ! sock ) {
2017-09-18 18:45:51 -04:00
return nullptr ;
}
2022-07-18 13:28:40 +02:00
connected = ConnectThroughProxy ( proxy , addrConnect . ToStringAddr ( ) , addrConnect . GetPort ( ) ,
2020-12-28 16:57:10 +01:00
* sock , nConnectTimeout , proxyConnectionFailed ) ;
2017-09-18 18:45:51 -04:00
} else {
// no proxy needed (none set for target network)
2020-12-23 16:40:11 +01:00
sock = CreateSock ( addrConnect ) ;
if ( ! sock ) {
2017-09-18 18:45:51 -04:00
return nullptr ;
}
2021-03-05 17:01:59 +01:00
connected = ConnectSocketDirectly ( addrConnect , * sock , nConnectTimeout ,
2020-12-23 16:40:11 +01:00
conn_type = = ConnectionType : : MANUAL ) ;
2017-09-18 18:45:51 -04:00
}
2017-06-22 14:01:04 -04:00
if ( ! proxyConnectionFailed ) {
// If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to
// the proxy, mark this as an attempt.
addrman . Attempt ( addrConnect , fCountFailure ) ;
}
} else if ( pszDest & & GetNameProxy ( proxy ) ) {
2020-12-23 16:40:11 +01:00
sock = CreateSock ( proxy . proxy ) ;
if ( ! sock ) {
2017-09-18 18:45:51 -04:00
return nullptr ;
}
2017-06-22 14:01:04 -04:00
std : : string host ;
2021-03-01 21:35:28 +01:00
uint16_t port { default_port } ;
2017-06-22 14:01:04 -04:00
SplitHostPort ( std : : string ( pszDest ) , port , host ) ;
2019-12-11 16:39:29 +00:00
bool proxyConnectionFailed ;
2020-12-28 16:57:10 +01:00
connected = ConnectThroughProxy ( proxy , host , port , * sock , nConnectTimeout ,
2020-12-23 16:40:11 +01:00
proxyConnectionFailed ) ;
2017-06-22 14:01:04 -04:00
}
2017-10-02 14:18:32 -04:00
if ( ! connected ) {
return nullptr ;
2010-08-29 16:58:15 +00:00
}
2014-05-24 11:14:52 +02:00
2017-10-02 14:18:32 -04:00
// Add node
NodeId id = GetNewNodeId ( ) ;
uint64_t nonce = GetDeterministicRandomizer ( RANDOMIZER_ID_LOCALHOSTNONCE ) . Write ( id ) . Finalize ( ) ;
2020-11-25 14:42:33 +01:00
if ( ! addr_bind . IsValid ( ) ) {
2021-04-13 15:11:20 +02:00
addr_bind = GetBindAddress ( * sock ) ;
2020-11-25 14:42:33 +01:00
}
2022-01-28 06:31:41 +01:00
CNode * pnode = new CNode ( id ,
std : : move ( sock ) ,
addrConnect ,
CalculateKeyedNetGroup ( addrConnect ) ,
nonce ,
addr_bind ,
pszDest ? pszDest : " " ,
conn_type ,
2022-06-08 17:59:32 +02:00
/*inbound_onion=*/ false ,
2023-03-24 15:45:50 +01:00
CNodeOptions {
. i2p_sam_session = std : : move ( i2p_transient_session ) ,
. recv_flood_size = nReceiveFloodSize ,
2023-08-21 16:55:47 -04:00
. use_v2transport = use_v2transport ,
2023-03-24 15:45:50 +01:00
} ) ;
2017-10-02 14:18:32 -04:00
pnode - > AddRef ( ) ;
2019-11-23 11:42:23 -05:00
// We're making a new connection, harvest entropy from the time (and our peer count)
RandAddEvent ( ( uint32_t ) id ) ;
2017-10-02 14:18:32 -04:00
return pnode ;
2010-08-29 16:58:15 +00:00
}
void CNode : : CloseSocketDisconnect ( )
{
fDisconnect = true ;
2021-04-23 15:30:46 +02:00
LOCK ( m_sock_mutex ) ;
2021-04-23 15:15:23 +02:00
if ( m_sock ) {
2016-12-25 20:19:40 +00:00
LogPrint ( BCLog : : NET , " disconnecting peer=%d \n " , id ) ;
2021-04-23 15:15:23 +02:00
m_sock . reset ( ) ;
2010-08-29 16:58:15 +00:00
}
2022-06-08 17:26:24 +02:00
m_i2p_sam_session . reset ( ) ;
2010-08-29 16:58:15 +00:00
}
2019-06-20 18:37:51 +09:00
void CConnman : : AddWhitelistPermissionFlags ( NetPermissionFlags & flags , const CNetAddr & addr ) const {
for ( const auto & subnet : vWhitelistedRange ) {
if ( subnet . m_subnet . Match ( addr ) ) NetPermissions : : AddFlag ( flags , subnet . m_flags ) ;
2014-06-21 13:34:36 +02:00
}
}
2021-08-24 19:19:58 +02:00
CService CNode : : GetAddrLocal ( ) const
{
2022-01-20 17:28:37 -03:00
AssertLockNotHeld ( m_addr_local_mutex ) ;
2022-01-20 05:41:33 -03:00
LOCK ( m_addr_local_mutex ) ;
2017-02-06 12:18:51 -05:00
return addrLocal ;
}
void CNode : : SetAddrLocal ( const CService & addrLocalIn ) {
2022-01-20 17:28:37 -03:00
AssertLockNotHeld ( m_addr_local_mutex ) ;
2022-01-20 05:41:33 -03:00
LOCK ( m_addr_local_mutex ) ;
2017-02-06 12:18:51 -05:00
if ( addrLocal . IsValid ( ) ) {
2022-07-15 14:13:39 +02:00
error ( " Addr local already set for node: %i. Refusing to change from %s to %s " , id , addrLocal . ToStringAddrPort ( ) , addrLocalIn . ToStringAddrPort ( ) ) ;
2017-02-06 12:18:51 -05:00
} else {
addrLocal = addrLocalIn ;
}
}
2020-09-30 19:19:19 +03:00
Network CNode : : ConnectedThroughNetwork ( ) const
{
2020-10-21 11:52:19 +02:00
return m_inbound_onion ? NET_ONION : addr . GetNetClass ( ) ;
2020-09-30 19:19:19 +03:00
}
2023-07-19 11:11:06 -06:00
bool CNode : : IsConnectedThroughPrivacyNet ( ) const
{
return m_inbound_onion | | addr . IsPrivacyNet ( ) ;
}
2012-06-29 17:24:53 -04:00
# undef X
# define X(name) stats.name = name
2021-09-01 11:24:46 +01:00
void CNode : : CopyStats ( CNodeStats & stats )
2012-06-29 17:24:53 -04:00
{
2013-11-18 01:25:17 +01:00
stats . nodeid = this - > GetId ( ) ;
2016-10-04 19:27:11 -04:00
X ( addr ) ;
2017-05-30 11:59:42 +02:00
X ( addrBind ) ;
2020-12-25 14:25:45 +01:00
stats . m_network = ConnectedThroughNetwork ( ) ;
2021-10-22 11:06:23 +02:00
X ( m_last_send ) ;
X ( m_last_recv ) ;
2021-12-13 12:32:28 +01:00
X ( m_last_tx_time ) ;
X ( m_last_block_time ) ;
X ( m_connected ) ;
2014-12-15 11:06:15 +01:00
X ( nTimeOffset ) ;
2021-08-26 10:39:10 +02:00
X ( m_addr_name ) ;
2012-06-29 17:24:53 -04:00
X ( nVersion ) ;
2017-02-06 12:08:31 -05:00
{
2022-01-16 02:11:04 +01:00
LOCK ( m_subver_mutex ) ;
2017-02-06 12:08:31 -05:00
X ( cleanSubVer ) ;
}
2020-07-28 13:39:38 -07:00
stats . fInbound = IsInboundConn ( ) ;
2020-08-21 15:17:42 +02:00
X ( m_bip152_highbandwidth_to ) ;
X ( m_bip152_highbandwidth_from ) ;
2017-02-06 02:34:57 -05:00
{
LOCK ( cs_vSend ) ;
2022-04-07 17:13:52 +05:30
X ( mapSendBytesPerMsgType ) ;
2017-02-06 02:34:57 -05:00
X ( nSendBytes ) ;
}
{
LOCK ( cs_vRecv ) ;
2022-04-07 17:13:52 +05:30
X ( mapRecvBytesPerMsgType ) ;
2017-02-06 02:34:57 -05:00
X ( nRecvBytes ) ;
}
2022-09-01 18:50:26 +10:00
X ( m_permission_flags ) ;
2013-11-15 12:24:34 +01:00
2020-09-29 19:11:53 -07:00
X ( m_last_ping_time ) ;
X ( m_min_ping_time ) ;
2013-11-15 12:24:34 +01:00
2013-08-21 22:50:19 -07:00
// Leave string empty if addrLocal invalid (not filled in yet)
2017-02-06 12:18:51 -05:00
CService addrLocalUnlocked = GetAddrLocal ( ) ;
2022-07-15 14:13:39 +02:00
stats . addrLocal = addrLocalUnlocked . IsValid ( ) ? addrLocalUnlocked . ToStringAddrPort ( ) : " " ;
2020-08-12 13:57:13 -07:00
2021-01-02 10:44:03 +01:00
X ( m_conn_type ) ;
2012-06-29 17:24:53 -04:00
}
# undef X
2010-08-29 16:58:15 +00:00
2020-11-20 10:16:10 +01:00
bool CNode : : ReceiveMsgBytes ( Span < const uint8_t > msg_bytes , bool & complete )
2012-11-15 19:41:12 -05:00
{
2016-04-18 21:33:54 -04:00
complete = false ;
2020-04-14 13:24:18 -04:00
const auto time = GetTime < std : : chrono : : microseconds > ( ) ;
2017-02-06 02:34:57 -05:00
LOCK ( cs_vRecv ) ;
2020-07-10 18:19:11 +02:00
m_last_recv = std : : chrono : : duration_cast < std : : chrono : : seconds > ( time ) ;
2020-09-30 17:08:26 +02:00
nRecvBytes + = msg_bytes . size ( ) ;
while ( msg_bytes . size ( ) > 0 ) {
2012-11-15 19:41:12 -05:00
// absorb network data
2023-07-29 13:59:35 -04:00
if ( ! m_transport - > ReceivedBytes ( msg_bytes ) ) {
// Serious transport problem, disconnect from the peer.
2020-06-29 14:15:06 -04:00
return false ;
}
2015-03-05 04:01:22 -08:00
2023-08-14 16:37:05 -04:00
if ( m_transport - > ReceivedMessageComplete ( ) ) {
2019-06-13 10:39:44 +02:00
// decompose a transport agnostic CNetMessage from the deserializer
2020-11-05 05:05:32 -05:00
bool reject_message { false } ;
2023-08-14 16:37:05 -04:00
CNetMessage msg = m_transport - > GetReceivedMessage ( time , reject_message ) ;
2020-11-05 05:05:32 -05:00
if ( reject_message ) {
2022-01-29 18:58:07 +05:30
// Message deserialization failed. Drop the message but don't disconnect the peer.
2020-06-29 14:15:06 -04:00
// store the size of the corrupt message
2022-04-07 17:13:52 +05:30
mapRecvBytesPerMsgType . at ( NET_MESSAGE_TYPE_OTHER ) + = msg . m_raw_message_size ;
2020-06-29 14:15:06 -04:00
continue ;
}
2019-06-13 10:39:44 +02:00
2022-01-29 18:58:07 +05:30
// Store received bytes per message type.
// To prevent a memory DOS, only allow known message types.
2022-04-07 17:13:52 +05:30
auto i = mapRecvBytesPerMsgType . find ( msg . m_type ) ;
if ( i = = mapRecvBytesPerMsgType . end ( ) ) {
i = mapRecvBytesPerMsgType . find ( NET_MESSAGE_TYPE_OTHER ) ;
2020-11-05 05:05:32 -05:00
}
2022-04-07 17:13:52 +05:30
assert ( i ! = mapRecvBytesPerMsgType . end ( ) ) ;
2020-11-05 05:05:32 -05:00
i - > second + = msg . m_raw_message_size ;
2019-06-13 10:39:44 +02:00
// push the message to the process queue,
2020-11-05 05:05:32 -05:00
vRecvMsg . push_back ( std : : move ( msg ) ) ;
2015-08-25 16:30:31 +02:00
2016-04-18 21:33:54 -04:00
complete = true ;
2015-04-05 02:35:37 -07:00
}
2012-11-15 19:41:12 -05:00
}
return true ;
}
2023-07-27 15:35:41 -04:00
V1Transport : : V1Transport ( const NodeId node_id , int nTypeIn , int nVersionIn ) noexcept :
m_node_id ( node_id ) , hdrbuf ( nTypeIn , nVersionIn ) , vRecv ( nTypeIn , nVersionIn )
{
assert ( std : : size ( Params ( ) . MessageStart ( ) ) = = std : : size ( m_magic_bytes ) ) ;
2023-09-12 13:01:07 +02:00
m_magic_bytes = Params ( ) . MessageStart ( ) ;
2023-07-27 15:35:41 -04:00
LOCK ( m_recv_mutex ) ;
Reset ( ) ;
}
2023-07-05 16:22:52 -04:00
int V1Transport : : readHeader ( Span < const uint8_t > msg_bytes )
2012-11-15 19:41:12 -05:00
{
2023-07-26 13:19:31 -04:00
AssertLockHeld ( m_recv_mutex ) ;
2012-11-15 19:41:12 -05:00
// copy data to temporary parsing buffer
2020-05-10 15:47:32 +02:00
unsigned int nRemaining = CMessageHeader : : HEADER_SIZE - nHdrPos ;
2020-09-30 17:08:26 +02:00
unsigned int nCopy = std : : min < unsigned int > ( nRemaining , msg_bytes . size ( ) ) ;
2012-11-15 19:41:12 -05:00
2020-09-30 17:08:26 +02:00
memcpy ( & hdrbuf [ nHdrPos ] , msg_bytes . data ( ) , nCopy ) ;
2012-11-15 19:41:12 -05:00
nHdrPos + = nCopy ;
// if header incomplete, exit
2020-05-10 15:47:32 +02:00
if ( nHdrPos < CMessageHeader : : HEADER_SIZE )
2012-11-15 19:41:12 -05:00
return nCopy ;
// deserialize to CMessageHeader
try {
hdrbuf > > hdr ;
}
2014-12-07 13:29:06 +01:00
catch ( const std : : exception & ) {
2021-05-07 07:45:55 +02:00
LogPrint ( BCLog : : NET , " Header error: Unable to deserialize, peer=%d \n " , m_node_id ) ;
2020-05-26 17:01:57 -04:00
return - 1 ;
}
// Check start string, network magic
2023-09-12 13:01:07 +02:00
if ( hdr . pchMessageStart ! = m_magic_bytes ) {
2021-05-07 07:45:55 +02:00
LogPrint ( BCLog : : NET , " Header error: Wrong MessageStart %s received, peer=%d \n " , HexStr ( hdr . pchMessageStart ) , m_node_id ) ;
2012-11-15 19:41:12 -05:00
return - 1 ;
}
2019-10-18 11:57:10 -07:00
// reject messages larger than MAX_SIZE or MAX_PROTOCOL_MESSAGE_LENGTH
if ( hdr . nMessageSize > MAX_SIZE | | hdr . nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH ) {
2021-05-07 07:45:55 +02:00
LogPrint ( BCLog : : NET , " Header error: Size too large (%s, %u bytes), peer=%d \n " , SanitizeString ( hdr . GetCommand ( ) ) , hdr . nMessageSize , m_node_id ) ;
2017-03-06 17:54:08 +01:00
return - 1 ;
2019-10-18 11:57:10 -07:00
}
2012-11-15 19:41:12 -05:00
// switch state to reading message data
in_data = true ;
return nCopy ;
}
2023-07-05 16:22:52 -04:00
int V1Transport : : readData ( Span < const uint8_t > msg_bytes )
2012-11-15 19:41:12 -05:00
{
2023-07-26 13:19:31 -04:00
AssertLockHeld ( m_recv_mutex ) ;
2012-11-15 19:41:12 -05:00
unsigned int nRemaining = hdr . nMessageSize - nDataPos ;
2020-09-30 17:08:26 +02:00
unsigned int nCopy = std : : min < unsigned int > ( nRemaining , msg_bytes . size ( ) ) ;
2012-11-15 19:41:12 -05:00
2014-06-21 17:00:38 +02:00
if ( vRecv . size ( ) < nDataPos + nCopy ) {
// Allocate up to 256 KiB ahead, but never more than the total message size.
vRecv . resize ( std : : min ( hdr . nMessageSize , nDataPos + nCopy + 256 * 1024 ) ) ;
}
2020-11-20 10:16:10 +01:00
hasher . Write ( msg_bytes . first ( nCopy ) ) ;
2020-09-30 17:08:26 +02:00
memcpy ( & vRecv [ nDataPos ] , msg_bytes . data ( ) , nCopy ) ;
2012-11-15 19:41:12 -05:00
nDataPos + = nCopy ;
return nCopy ;
}
2023-07-05 16:22:52 -04:00
const uint256 & V1Transport : : GetMessageHash ( ) const
2016-10-30 18:02:16 -04:00
{
2023-07-26 13:19:31 -04:00
AssertLockHeld ( m_recv_mutex ) ;
assert ( CompleteInternal ( ) ) ;
2016-10-30 18:02:16 -04:00
if ( data_hash . IsNull ( ) )
2020-06-18 17:19:46 -07:00
hasher . Finalize ( data_hash ) ;
2016-10-30 18:02:16 -04:00
return data_hash ;
}
2023-08-14 16:37:05 -04:00
CNetMessage V1Transport : : GetReceivedMessage ( const std : : chrono : : microseconds time , bool & reject_message )
2020-04-14 13:24:18 -04:00
{
2023-07-26 13:19:31 -04:00
AssertLockNotHeld ( m_recv_mutex ) ;
2020-11-05 05:05:32 -05:00
// Initialize out parameter
reject_message = false ;
2019-06-13 10:39:44 +02:00
// decompose a single CNetMessage from the TransportDeserializer
2023-07-26 13:19:31 -04:00
LOCK ( m_recv_mutex ) ;
2020-11-05 05:05:32 -05:00
CNetMessage msg ( std : : move ( vRecv ) ) ;
2019-06-13 10:39:44 +02:00
2022-01-29 18:58:07 +05:30
// store message type string, time, and sizes
2022-01-15 20:59:19 +02:00
msg . m_type = hdr . GetCommand ( ) ;
2020-11-05 05:05:32 -05:00
msg . m_time = time ;
msg . m_message_size = hdr . nMessageSize ;
msg . m_raw_message_size = hdr . nMessageSize + CMessageHeader : : HEADER_SIZE ;
2019-06-13 10:39:44 +02:00
2020-06-08 22:26:22 -04:00
uint256 hash = GetMessageHash ( ) ;
2019-06-13 10:39:44 +02:00
2019-11-23 11:42:23 -05:00
// We just received a message off the wire, harvest entropy from the time (and the message checksum)
RandAddEvent ( ReadLE32 ( hash . begin ( ) ) ) ;
2022-01-29 18:58:07 +05:30
// Check checksum and header message type string
2020-06-29 14:15:06 -04:00
if ( memcmp ( hash . begin ( ) , hdr . pchChecksum , CMessageHeader : : CHECKSUM_SIZE ) ! = 0 ) {
2021-05-07 07:45:55 +02:00
LogPrint ( BCLog : : NET , " Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d \n " ,
2022-01-15 20:59:19 +02:00
SanitizeString ( msg . m_type ) , msg . m_message_size ,
2021-11-02 10:07:46 -04:00
HexStr ( Span { hash } . first ( CMessageHeader : : CHECKSUM_SIZE ) ) ,
2020-06-29 14:09:42 -04:00
HexStr ( hdr . pchChecksum ) ,
m_node_id ) ;
2020-11-05 05:05:32 -05:00
reject_message = true ;
2020-05-26 17:01:57 -04:00
} else if ( ! hdr . IsCommandValid ( ) ) {
2021-05-07 07:45:55 +02:00
LogPrint ( BCLog : : NET , " Header error: Invalid message type (%s, %u bytes), peer=%d \n " ,
2020-11-05 05:05:32 -05:00
SanitizeString ( hdr . GetCommand ( ) ) , msg . m_message_size , m_node_id ) ;
reject_message = true ;
2019-06-13 10:39:44 +02:00
}
2020-06-29 14:15:06 -04:00
// Always reset the network deserializer (prepare for the next message)
2019-06-13 10:39:44 +02:00
Reset ( ) ;
return msg ;
}
2023-07-21 16:31:59 -04:00
bool V1Transport : : SetMessageToSend ( CSerializedNetMsg & msg ) noexcept
2022-03-05 04:09:35 +10:00
{
2023-07-21 16:31:59 -04:00
AssertLockNotHeld ( m_send_mutex ) ;
// Determine whether a new message can be set.
LOCK ( m_send_mutex ) ;
if ( m_sending_header | | m_bytes_sent < m_message_to_send . data . size ( ) ) return false ;
2019-08-07 15:56:24 +02:00
// create dbl-sha256 checksum
2020-06-26 13:36:41 -07:00
uint256 hash = Hash ( msg . data ) ;
2019-08-07 15:56:24 +02:00
// create header
2023-07-27 15:35:41 -04:00
CMessageHeader hdr ( m_magic_bytes , msg . m_type . c_str ( ) , msg . data . size ( ) ) ;
2019-08-07 15:56:24 +02:00
memcpy ( hdr . pchChecksum , hash . begin ( ) , CMessageHeader : : CHECKSUM_SIZE ) ;
// serialize header
2023-07-21 16:31:59 -04:00
m_header_to_send . clear ( ) ;
2023-09-11 14:58:22 +00:00
CVectorWriter { INIT_PROTO_VERSION , m_header_to_send , 0 , hdr } ;
2023-07-21 16:31:59 -04:00
// update state
m_message_to_send = std : : move ( msg ) ;
m_sending_header = true ;
m_bytes_sent = 0 ;
return true ;
}
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
Transport : : BytesToSend V1Transport : : GetBytesToSend ( bool have_next_message ) const noexcept
2023-07-21 16:31:59 -04:00
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
if ( m_sending_header ) {
return { Span { m_header_to_send } . subspan ( m_bytes_sent ) ,
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// We have more to send after the header if the message has payload, or if there
// is a next message after that.
have_next_message | | ! m_message_to_send . data . empty ( ) ,
2023-07-21 16:31:59 -04:00
m_message_to_send . m_type
} ;
} else {
return { Span { m_message_to_send . data } . subspan ( m_bytes_sent ) ,
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// We only have more to send after this message's payload if there is another
// message.
have_next_message ,
2023-07-21 16:31:59 -04:00
m_message_to_send . m_type
} ;
}
}
void V1Transport : : MarkBytesSent ( size_t bytes_sent ) noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
m_bytes_sent + = bytes_sent ;
if ( m_sending_header & & m_bytes_sent = = m_header_to_send . size ( ) ) {
// We're done sending a message's header. Switch to sending its data bytes.
m_sending_header = false ;
m_bytes_sent = 0 ;
} else if ( ! m_sending_header & & m_bytes_sent = = m_message_to_send . data . size ( ) ) {
// We're done sending a message's data. Wipe the data vector to reduce memory consumption.
2023-09-11 13:54:32 -04:00
ClearShrink ( m_message_to_send . data ) ;
2023-07-21 16:31:59 -04:00
m_bytes_sent = 0 ;
}
2019-08-07 15:56:24 +02:00
}
2023-07-24 13:23:39 -04:00
size_t V1Transport : : GetSendMemoryUsage ( ) const noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
// Don't count sending-side fields besides m_message_to_send, as they're all small and bounded.
return m_message_to_send . GetMemoryUsage ( ) ;
}
2023-07-30 21:25:10 -04:00
namespace {
/** List of short messages as defined in BIP324, in order.
*
* Only message types that are actually implemented in this codebase need to be listed , as other
* messages get ignored anyway - whether we know how to decode them or not .
*/
const std : : array < std : : string , 33 > V2_MESSAGE_IDS = {
" " , // 12 bytes follow encoding the message type like in V1
NetMsgType : : ADDR ,
NetMsgType : : BLOCK ,
NetMsgType : : BLOCKTXN ,
NetMsgType : : CMPCTBLOCK ,
NetMsgType : : FEEFILTER ,
NetMsgType : : FILTERADD ,
NetMsgType : : FILTERCLEAR ,
NetMsgType : : FILTERLOAD ,
NetMsgType : : GETBLOCKS ,
NetMsgType : : GETBLOCKTXN ,
NetMsgType : : GETDATA ,
NetMsgType : : GETHEADERS ,
NetMsgType : : HEADERS ,
NetMsgType : : INV ,
NetMsgType : : MEMPOOL ,
NetMsgType : : MERKLEBLOCK ,
NetMsgType : : NOTFOUND ,
NetMsgType : : PING ,
NetMsgType : : PONG ,
NetMsgType : : SENDCMPCT ,
NetMsgType : : TX ,
NetMsgType : : GETCFILTERS ,
NetMsgType : : CFILTER ,
NetMsgType : : GETCFHEADERS ,
NetMsgType : : CFHEADERS ,
NetMsgType : : GETCFCHECKPT ,
NetMsgType : : CFCHECKPT ,
NetMsgType : : ADDRV2 ,
// Unimplemented message types that are assigned in BIP324:
" " ,
" " ,
" " ,
" "
} ;
class V2MessageMap
{
std : : unordered_map < std : : string , uint8_t > m_map ;
public :
V2MessageMap ( ) noexcept
{
for ( size_t i = 1 ; i < std : : size ( V2_MESSAGE_IDS ) ; + + i ) {
m_map . emplace ( V2_MESSAGE_IDS [ i ] , i ) ;
}
}
std : : optional < uint8_t > operator ( ) ( const std : : string & message_name ) const noexcept
{
auto it = m_map . find ( message_name ) ;
if ( it = = m_map . end ( ) ) return std : : nullopt ;
return it - > second ;
}
} ;
const V2MessageMap V2_MESSAGE_MAP ;
2023-09-08 11:48:09 -04:00
CKey GenerateRandomKey ( ) noexcept
{
CKey key ;
key . MakeNewKey ( /*fCompressed=*/ true ) ;
return key ;
}
2023-07-30 21:25:10 -04:00
2023-09-08 11:48:09 -04:00
std : : vector < uint8_t > GenerateRandomGarbage ( ) noexcept
2023-07-27 15:10:34 -04:00
{
2023-09-08 11:48:09 -04:00
std : : vector < uint8_t > ret ;
2023-07-30 11:43:10 -04:00
FastRandomContext rng ;
2023-09-08 11:48:09 -04:00
ret . resize ( rng . randrange ( V2Transport : : MAX_GARBAGE_LEN + 1 ) ) ;
rng . fillrand ( MakeWritableByteSpan ( ret ) ) ;
return ret ;
2023-07-27 15:10:34 -04:00
}
2023-09-08 11:48:09 -04:00
} // namespace
2023-09-08 13:55:47 -04:00
void V2Transport : : StartSendingHandshake ( ) noexcept
{
AssertLockHeld ( m_send_mutex ) ;
Assume ( m_send_state = = SendState : : AWAITING_KEY ) ;
Assume ( m_send_buffer . empty ( ) ) ;
// Initialize the send buffer with ellswift pubkey + provided garbage.
m_send_buffer . resize ( EllSwiftPubKey : : size ( ) + m_send_garbage . size ( ) ) ;
std : : copy ( std : : begin ( m_cipher . GetOurPubKey ( ) ) , std : : end ( m_cipher . GetOurPubKey ( ) ) , MakeWritableByteSpan ( m_send_buffer ) . begin ( ) ) ;
std : : copy ( m_send_garbage . begin ( ) , m_send_garbage . end ( ) , m_send_buffer . begin ( ) + EllSwiftPubKey : : size ( ) ) ;
2023-09-23 23:50:01 +00:00
// We cannot wipe m_send_garbage as it will still be used as AAD later in the handshake.
2023-09-08 13:55:47 -04:00
}
V2Transport : : V2Transport ( NodeId nodeid , bool initiating , int type_in , int version_in , const CKey & key , Span < const std : : byte > ent32 , std : : vector < uint8_t > garbage ) noexcept :
2023-07-27 15:10:34 -04:00
m_cipher { key , ent32 } , m_initiating { initiating } , m_nodeid { nodeid } ,
2023-08-29 22:37:18 -04:00
m_v1_fallback { nodeid , type_in , version_in } , m_recv_type { type_in } , m_recv_version { version_in } ,
m_recv_state { initiating ? RecvState : : KEY : RecvState : : KEY_MAYBE_V1 } ,
2023-09-08 13:55:47 -04:00
m_send_garbage { std : : move ( garbage ) } ,
2023-08-29 22:37:18 -04:00
m_send_state { initiating ? SendState : : AWAITING_KEY : SendState : : MAYBE_V1 }
2023-07-27 15:10:34 -04:00
{
2023-09-08 13:55:47 -04:00
Assume ( m_send_garbage . size ( ) < = MAX_GARBAGE_LEN ) ;
// Start sending immediately if we're the initiator of the connection.
if ( initiating ) {
LOCK ( m_send_mutex ) ;
StartSendingHandshake ( ) ;
}
2023-07-27 15:10:34 -04:00
}
2023-09-08 11:48:09 -04:00
V2Transport : : V2Transport ( NodeId nodeid , bool initiating , int type_in , int version_in ) noexcept :
V2Transport { nodeid , initiating , type_in , version_in , GenerateRandomKey ( ) ,
MakeByteSpan ( GetRandHash ( ) ) , GenerateRandomGarbage ( ) } { }
2023-07-27 15:10:34 -04:00
void V2Transport : : SetReceiveState ( RecvState recv_state ) noexcept
{
AssertLockHeld ( m_recv_mutex ) ;
// Enforce allowed state transitions.
switch ( m_recv_state ) {
2023-08-29 22:37:18 -04:00
case RecvState : : KEY_MAYBE_V1 :
Assume ( recv_state = = RecvState : : KEY | | recv_state = = RecvState : : V1 ) ;
break ;
2023-07-27 15:10:34 -04:00
case RecvState : : KEY :
Assume ( recv_state = = RecvState : : GARB_GARBTERM ) ;
break ;
case RecvState : : GARB_GARBTERM :
Assume ( recv_state = = RecvState : : VERSION ) ;
break ;
case RecvState : : VERSION :
Assume ( recv_state = = RecvState : : APP ) ;
break ;
case RecvState : : APP :
Assume ( recv_state = = RecvState : : APP_READY ) ;
break ;
case RecvState : : APP_READY :
Assume ( recv_state = = RecvState : : APP ) ;
break ;
2023-08-29 22:37:18 -04:00
case RecvState : : V1 :
Assume ( false ) ; // V1 state cannot be left
break ;
2023-07-27 15:10:34 -04:00
}
// Change state.
m_recv_state = recv_state ;
}
void V2Transport : : SetSendState ( SendState send_state ) noexcept
{
AssertLockHeld ( m_send_mutex ) ;
// Enforce allowed state transitions.
switch ( m_send_state ) {
2023-08-29 22:37:18 -04:00
case SendState : : MAYBE_V1 :
Assume ( send_state = = SendState : : V1 | | send_state = = SendState : : AWAITING_KEY ) ;
break ;
2023-07-27 15:10:34 -04:00
case SendState : : AWAITING_KEY :
Assume ( send_state = = SendState : : READY ) ;
break ;
case SendState : : READY :
2023-08-29 22:37:18 -04:00
case SendState : : V1 :
Assume ( false ) ; // Final states
2023-07-27 15:10:34 -04:00
break ;
}
// Change state.
m_send_state = send_state ;
}
bool V2Transport : : ReceivedMessageComplete ( ) const noexcept
{
AssertLockNotHeld ( m_recv_mutex ) ;
LOCK ( m_recv_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_recv_state = = RecvState : : V1 ) return m_v1_fallback . ReceivedMessageComplete ( ) ;
2023-07-27 15:10:34 -04:00
return m_recv_state = = RecvState : : APP_READY ;
}
2023-08-29 22:37:18 -04:00
void V2Transport : : ProcessReceivedMaybeV1Bytes ( ) noexcept
{
AssertLockHeld ( m_recv_mutex ) ;
AssertLockNotHeld ( m_send_mutex ) ;
Assume ( m_recv_state = = RecvState : : KEY_MAYBE_V1 ) ;
// We still have to determine if this is a v1 or v2 connection. The bytes being received could
// be the beginning of either a v1 packet (network magic + "version\x00"), or of a v2 public
// key. BIP324 specifies that a mismatch with this 12-byte string should trigger sending of the
// key.
std : : array < uint8_t , V1_PREFIX_LEN > v1_prefix = { 0 , 0 , 0 , 0 , ' v ' , ' e ' , ' r ' , ' s ' , ' i ' , ' o ' , ' n ' , 0 } ;
std : : copy ( std : : begin ( Params ( ) . MessageStart ( ) ) , std : : end ( Params ( ) . MessageStart ( ) ) , v1_prefix . begin ( ) ) ;
Assume ( m_recv_buffer . size ( ) < = v1_prefix . size ( ) ) ;
if ( ! std : : equal ( m_recv_buffer . begin ( ) , m_recv_buffer . end ( ) , v1_prefix . begin ( ) ) ) {
// Mismatch with v1 prefix, so we can assume a v2 connection.
SetReceiveState ( RecvState : : KEY ) ; // Convert to KEY state, leaving received bytes around.
2023-09-08 13:55:47 -04:00
// Transition the sender to AWAITING_KEY state and start sending.
2023-08-29 22:37:18 -04:00
LOCK ( m_send_mutex ) ;
SetSendState ( SendState : : AWAITING_KEY ) ;
2023-09-08 13:55:47 -04:00
StartSendingHandshake ( ) ;
2023-08-29 22:37:18 -04:00
} else if ( m_recv_buffer . size ( ) = = v1_prefix . size ( ) ) {
// Full match with the v1 prefix, so fall back to v1 behavior.
LOCK ( m_send_mutex ) ;
Span < const uint8_t > feedback { m_recv_buffer } ;
// Feed already received bytes to v1 transport. It should always accept these, because it's
// less than the size of a v1 header, and these are the first bytes fed to m_v1_fallback.
bool ret = m_v1_fallback . ReceivedBytes ( feedback ) ;
Assume ( feedback . empty ( ) ) ;
Assume ( ret ) ;
SetReceiveState ( RecvState : : V1 ) ;
SetSendState ( SendState : : V1 ) ;
// Reset v2 transport buffers to save memory.
2023-09-11 13:54:32 -04:00
ClearShrink ( m_recv_buffer ) ;
ClearShrink ( m_send_buffer ) ;
2023-08-29 22:37:18 -04:00
} else {
// We have not received enough to distinguish v1 from v2 yet. Wait until more bytes come.
}
}
2023-09-05 23:38:15 -04:00
bool V2Transport : : ProcessReceivedKeyBytes ( ) noexcept
2023-07-27 15:10:34 -04:00
{
AssertLockHeld ( m_recv_mutex ) ;
AssertLockNotHeld ( m_send_mutex ) ;
Assume ( m_recv_state = = RecvState : : KEY ) ;
Assume ( m_recv_buffer . size ( ) < = EllSwiftPubKey : : size ( ) ) ;
2023-09-05 23:38:15 -04:00
// As a special exception, if bytes 4-16 of the key on a responder connection match the
// corresponding bytes of a V1 version message, but bytes 0-4 don't match the network magic
// (if they did, we'd have switched to V1 state already), assume this is a peer from
// another network, and disconnect them. They will almost certainly disconnect us too when
// they receive our uniformly random key and garbage, but detecting this case specially
// means we can log it.
static constexpr std : : array < uint8_t , 12 > MATCH = { ' v ' , ' e ' , ' r ' , ' s ' , ' i ' , ' o ' , ' n ' , 0 , 0 , 0 , 0 , 0 } ;
2023-09-06 15:55:14 +02:00
static constexpr size_t OFFSET = std : : tuple_size_v < MessageStartChars > ;
2023-09-05 23:38:15 -04:00
if ( ! m_initiating & & m_recv_buffer . size ( ) > = OFFSET + MATCH . size ( ) ) {
if ( std : : equal ( MATCH . begin ( ) , MATCH . end ( ) , m_recv_buffer . begin ( ) + OFFSET ) ) {
LogPrint ( BCLog : : NET , " V2 transport error: V1 peer with wrong MessageStart %s \n " ,
HexStr ( Span ( m_recv_buffer ) . first ( OFFSET ) ) ) ;
return false ;
}
}
2023-07-27 15:10:34 -04:00
if ( m_recv_buffer . size ( ) = = EllSwiftPubKey : : size ( ) ) {
// Other side's key has been fully received, and can now be Diffie-Hellman combined with
// our key to initialize the encryption ciphers.
// Initialize the ciphers.
EllSwiftPubKey ellswift ( MakeByteSpan ( m_recv_buffer ) ) ;
LOCK ( m_send_mutex ) ;
m_cipher . Initialize ( ellswift , m_initiating ) ;
// Switch receiver state to GARB_GARBTERM.
SetReceiveState ( RecvState : : GARB_GARBTERM ) ;
m_recv_buffer . clear ( ) ;
// Switch sender state to READY.
SetSendState ( SendState : : READY ) ;
// Append the garbage terminator to the send buffer.
m_send_buffer . resize ( m_send_buffer . size ( ) + BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) ;
std : : copy ( m_cipher . GetSendGarbageTerminator ( ) . begin ( ) ,
m_cipher . GetSendGarbageTerminator ( ) . end ( ) ,
MakeWritableByteSpan ( m_send_buffer ) . last ( BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) . begin ( ) ) ;
2023-09-23 23:50:01 +00:00
// Construct version packet in the send buffer, with the sent garbage data as AAD.
2023-07-27 15:10:34 -04:00
m_send_buffer . resize ( m_send_buffer . size ( ) + BIP324Cipher : : EXPANSION + VERSION_CONTENTS . size ( ) ) ;
m_cipher . Encrypt (
/*contents=*/ VERSION_CONTENTS ,
2023-09-23 23:50:01 +00:00
/*aad=*/ MakeByteSpan ( m_send_garbage ) ,
2023-07-27 15:10:34 -04:00
/*ignore=*/ false ,
/*output=*/ MakeWritableByteSpan ( m_send_buffer ) . last ( BIP324Cipher : : EXPANSION + VERSION_CONTENTS . size ( ) ) ) ;
2023-09-23 23:50:01 +00:00
// We no longer need the garbage.
ClearShrink ( m_send_garbage ) ;
2023-07-27 15:10:34 -04:00
} else {
// We still have to receive more key bytes.
}
2023-09-05 23:38:15 -04:00
return true ;
2023-07-27 15:10:34 -04:00
}
bool V2Transport : : ProcessReceivedGarbageBytes ( ) noexcept
{
AssertLockHeld ( m_recv_mutex ) ;
Assume ( m_recv_state = = RecvState : : GARB_GARBTERM ) ;
Assume ( m_recv_buffer . size ( ) < = MAX_GARBAGE_LEN + BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) ;
if ( m_recv_buffer . size ( ) > = BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) {
if ( MakeByteSpan ( m_recv_buffer ) . last ( BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) = = m_cipher . GetReceiveGarbageTerminator ( ) ) {
2023-09-23 23:50:01 +00:00
// Garbage terminator received. Store garbage to authenticate it as AAD later.
2023-09-26 13:26:27 +02:00
m_recv_aad = std : : move ( m_recv_buffer ) ;
m_recv_aad . resize ( m_recv_aad . size ( ) - BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) ;
2023-07-27 15:10:34 -04:00
m_recv_buffer . clear ( ) ;
2023-09-23 23:50:01 +00:00
SetReceiveState ( RecvState : : VERSION ) ;
2023-07-27 15:10:34 -04:00
} else if ( m_recv_buffer . size ( ) = = MAX_GARBAGE_LEN + BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) {
// We've reached the maximum length for garbage + garbage terminator, and the
// terminator still does not match. Abort.
LogPrint ( BCLog : : NET , " V2 transport error: missing garbage terminator, peer=%d \n " , m_nodeid ) ;
return false ;
} else {
// We still need to receive more garbage and/or garbage terminator bytes.
}
} else {
// We have less than GARBAGE_TERMINATOR_LEN (16) bytes, so we certainly need to receive
// more first.
}
return true ;
}
bool V2Transport : : ProcessReceivedPacketBytes ( ) noexcept
{
AssertLockHeld ( m_recv_mutex ) ;
2023-09-23 23:50:01 +00:00
Assume ( m_recv_state = = RecvState : : VERSION | | m_recv_state = = RecvState : : APP ) ;
2023-07-27 15:10:34 -04:00
// The maximum permitted contents length for a packet, consisting of:
// - 0x00 byte: indicating long message type encoding
// - 12 bytes of message type
// - payload
static constexpr size_t MAX_CONTENTS_LEN =
1 + CMessageHeader : : COMMAND_SIZE +
std : : min < size_t > ( MAX_SIZE , MAX_PROTOCOL_MESSAGE_LENGTH ) ;
if ( m_recv_buffer . size ( ) = = BIP324Cipher : : LENGTH_LEN ) {
// Length descriptor received.
m_recv_len = m_cipher . DecryptLength ( MakeByteSpan ( m_recv_buffer ) ) ;
if ( m_recv_len > MAX_CONTENTS_LEN ) {
LogPrint ( BCLog : : NET , " V2 transport error: packet too large (%u bytes), peer=%d \n " , m_recv_len , m_nodeid ) ;
return false ;
}
} else if ( m_recv_buffer . size ( ) > BIP324Cipher : : LENGTH_LEN & & m_recv_buffer . size ( ) = = m_recv_len + BIP324Cipher : : EXPANSION ) {
// Ciphertext received, decrypt it into m_recv_decode_buffer.
// Note that it is impossible to reach this branch without hitting the branch above first,
// as GetMaxBytesToProcess only allows up to LENGTH_LEN into the buffer before that point.
m_recv_decode_buffer . resize ( m_recv_len ) ;
bool ignore { false } ;
bool ret = m_cipher . Decrypt (
/*input=*/ MakeByteSpan ( m_recv_buffer ) . subspan ( BIP324Cipher : : LENGTH_LEN ) ,
2023-09-26 13:26:27 +02:00
/*aad=*/ MakeByteSpan ( m_recv_aad ) ,
2023-07-27 15:10:34 -04:00
/*ignore=*/ ignore ,
/*contents=*/ MakeWritableByteSpan ( m_recv_decode_buffer ) ) ;
if ( ! ret ) {
LogPrint ( BCLog : : NET , " V2 transport error: packet decryption failure (%u bytes), peer=%d \n " , m_recv_len , m_nodeid ) ;
return false ;
}
2023-09-26 13:26:27 +02:00
// We have decrypted a valid packet with the AAD we expected, so clear the expected AAD.
ClearShrink ( m_recv_aad ) ;
2023-07-27 15:10:34 -04:00
// Feed the last 4 bytes of the Poly1305 authentication tag (and its timing) into our RNG.
RandAddEvent ( ReadLE32 ( m_recv_buffer . data ( ) + m_recv_buffer . size ( ) - 4 ) ) ;
2023-09-26 13:26:27 +02:00
// At this point we have a valid packet decrypted into m_recv_decode_buffer. If it's not a
// decoy, which we simply ignore, use the current state to decide what to do with it.
if ( ! ignore ) {
switch ( m_recv_state ) {
case RecvState : : VERSION :
2023-07-27 15:10:34 -04:00
// Version message received; transition to application phase. The contents is
// ignored, but can be used for future extensions.
SetReceiveState ( RecvState : : APP ) ;
2023-09-26 13:26:27 +02:00
break ;
case RecvState : : APP :
2023-07-27 15:10:34 -04:00
// Application message decrypted correctly. It can be extracted using GetMessage().
SetReceiveState ( RecvState : : APP_READY ) ;
2023-09-26 13:26:27 +02:00
break ;
default :
// Any other state is invalid (this function should not have been called).
Assume ( false ) ;
2023-07-27 15:10:34 -04:00
}
}
// Wipe the receive buffer where the next packet will be received into.
2023-09-11 13:54:32 -04:00
ClearShrink ( m_recv_buffer ) ;
2023-07-27 15:10:34 -04:00
// In all but APP_READY state, we can wipe the decoded contents.
2023-09-11 13:54:32 -04:00
if ( m_recv_state ! = RecvState : : APP_READY ) ClearShrink ( m_recv_decode_buffer ) ;
2023-07-27 15:10:34 -04:00
} else {
// We either have less than 3 bytes, so we don't know the packet's length yet, or more
// than 3 bytes but less than the packet's full ciphertext. Wait until those arrive.
}
return true ;
}
size_t V2Transport : : GetMaxBytesToProcess ( ) noexcept
{
AssertLockHeld ( m_recv_mutex ) ;
switch ( m_recv_state ) {
2023-08-29 22:37:18 -04:00
case RecvState : : KEY_MAYBE_V1 :
// During the KEY_MAYBE_V1 state we do not allow more than the length of v1 prefix into the
// receive buffer.
Assume ( m_recv_buffer . size ( ) < = V1_PREFIX_LEN ) ;
// As long as we're not sure if this is a v1 or v2 connection, don't receive more than what
// is strictly necessary to distinguish the two (12 bytes). If we permitted more than
// the v1 header size (24 bytes), we may not be able to feed the already-received bytes
// back into the m_v1_fallback V1 transport.
return V1_PREFIX_LEN - m_recv_buffer . size ( ) ;
2023-07-27 15:10:34 -04:00
case RecvState : : KEY :
// During the KEY state, we only allow the 64-byte key into the receive buffer.
Assume ( m_recv_buffer . size ( ) < = EllSwiftPubKey : : size ( ) ) ;
// As long as we have not received the other side's public key, don't receive more than
// that (64 bytes), as garbage follows, and locating the garbage terminator requires the
// key exchange first.
return EllSwiftPubKey : : size ( ) - m_recv_buffer . size ( ) ;
case RecvState : : GARB_GARBTERM :
// Process garbage bytes one by one (because terminator may appear anywhere).
return 1 ;
case RecvState : : VERSION :
case RecvState : : APP :
// These three states all involve decoding a packet. Process the length descriptor first,
// so that we know where the current packet ends (and we don't process bytes from the next
// packet or decoy yet). Then, process the ciphertext bytes of the current packet.
if ( m_recv_buffer . size ( ) < BIP324Cipher : : LENGTH_LEN ) {
return BIP324Cipher : : LENGTH_LEN - m_recv_buffer . size ( ) ;
} else {
// Note that BIP324Cipher::EXPANSION is the total difference between contents size
// and encoded packet size, which includes the 3 bytes due to the packet length.
// When transitioning from receiving the packet length to receiving its ciphertext,
// the encrypted packet length is left in the receive buffer.
return BIP324Cipher : : EXPANSION + m_recv_len - m_recv_buffer . size ( ) ;
}
case RecvState : : APP_READY :
// No bytes can be processed until GetMessage() is called.
return 0 ;
2023-08-29 22:37:18 -04:00
case RecvState : : V1 :
// Not allowed (must be dealt with by the caller).
Assume ( false ) ;
return 0 ;
2023-07-27 15:10:34 -04:00
}
Assume ( false ) ; // unreachable
return 0 ;
}
bool V2Transport : : ReceivedBytes ( Span < const uint8_t > & msg_bytes ) noexcept
{
AssertLockNotHeld ( m_recv_mutex ) ;
2023-07-30 10:51:12 -04:00
/** How many bytes to allocate in the receive buffer at most above what is received so far. */
static constexpr size_t MAX_RESERVE_AHEAD = 256 * 1024 ;
2023-07-27 15:10:34 -04:00
LOCK ( m_recv_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_recv_state = = RecvState : : V1 ) return m_v1_fallback . ReceivedBytes ( msg_bytes ) ;
2023-07-27 15:10:34 -04:00
// Process the provided bytes in msg_bytes in a loop. In each iteration a nonzero number of
// bytes (decided by GetMaxBytesToProcess) are taken from the beginning om msg_bytes, and
// appended to m_recv_buffer. Then, depending on the receiver state, one of the
// ProcessReceived*Bytes functions is called to process the bytes in that buffer.
while ( ! msg_bytes . empty ( ) ) {
// Decide how many bytes to copy from msg_bytes to m_recv_buffer.
size_t max_read = GetMaxBytesToProcess ( ) ;
2023-07-30 10:51:12 -04:00
// Reserve space in the buffer if there is not enough.
if ( m_recv_buffer . size ( ) + std : : min ( msg_bytes . size ( ) , max_read ) > m_recv_buffer . capacity ( ) ) {
switch ( m_recv_state ) {
case RecvState : : KEY_MAYBE_V1 :
case RecvState : : KEY :
case RecvState : : GARB_GARBTERM :
// During the initial states (key/garbage), allocate once to fit the maximum (4111
// bytes).
m_recv_buffer . reserve ( MAX_GARBAGE_LEN + BIP324Cipher : : GARBAGE_TERMINATOR_LEN ) ;
break ;
case RecvState : : VERSION :
case RecvState : : APP : {
// During states where a packet is being received, as much as is expected but never
// more than MAX_RESERVE_AHEAD bytes in addition to what is received so far.
// This means attackers that want to cause us to waste allocated memory are limited
// to MAX_RESERVE_AHEAD above the largest allowed message contents size, and to
// MAX_RESERVE_AHEAD more than they've actually sent us.
size_t alloc_add = std : : min ( max_read , msg_bytes . size ( ) + MAX_RESERVE_AHEAD ) ;
m_recv_buffer . reserve ( m_recv_buffer . size ( ) + alloc_add ) ;
break ;
}
case RecvState : : APP_READY :
// The buffer is empty in this state.
Assume ( m_recv_buffer . empty ( ) ) ;
break ;
case RecvState : : V1 :
// Should have bailed out above.
Assume ( false ) ;
break ;
}
}
2023-07-27 15:10:34 -04:00
// Can't read more than provided input.
max_read = std : : min ( msg_bytes . size ( ) , max_read ) ;
// Copy data to buffer.
m_recv_buffer . insert ( m_recv_buffer . end ( ) , UCharCast ( msg_bytes . data ( ) ) , UCharCast ( msg_bytes . data ( ) + max_read ) ) ;
msg_bytes = msg_bytes . subspan ( max_read ) ;
// Process data in the buffer.
switch ( m_recv_state ) {
2023-08-29 22:37:18 -04:00
case RecvState : : KEY_MAYBE_V1 :
ProcessReceivedMaybeV1Bytes ( ) ;
if ( m_recv_state = = RecvState : : V1 ) return true ;
break ;
2023-07-27 15:10:34 -04:00
case RecvState : : KEY :
2023-09-05 23:38:15 -04:00
if ( ! ProcessReceivedKeyBytes ( ) ) return false ;
2023-07-27 15:10:34 -04:00
break ;
case RecvState : : GARB_GARBTERM :
if ( ! ProcessReceivedGarbageBytes ( ) ) return false ;
break ;
case RecvState : : VERSION :
case RecvState : : APP :
if ( ! ProcessReceivedPacketBytes ( ) ) return false ;
break ;
case RecvState : : APP_READY :
return true ;
2023-08-29 22:37:18 -04:00
case RecvState : : V1 :
// We should have bailed out before.
Assume ( false ) ;
break ;
2023-07-27 15:10:34 -04:00
}
// Make sure we have made progress before continuing.
Assume ( max_read > 0 ) ;
}
return true ;
}
std : : optional < std : : string > V2Transport : : GetMessageType ( Span < const uint8_t > & contents ) noexcept
{
if ( contents . size ( ) = = 0 ) return std : : nullopt ; // Empty contents
uint8_t first_byte = contents [ 0 ] ;
contents = contents . subspan ( 1 ) ; // Strip first byte.
2023-07-30 21:25:10 -04:00
if ( first_byte ! = 0 ) {
// Short (1 byte) encoding.
if ( first_byte < std : : size ( V2_MESSAGE_IDS ) ) {
// Valid short message id.
return V2_MESSAGE_IDS [ first_byte ] ;
} else {
// Unknown short message id.
return std : : nullopt ;
}
}
2023-07-27 15:10:34 -04:00
if ( contents . size ( ) < CMessageHeader : : COMMAND_SIZE ) {
return std : : nullopt ; // Long encoding needs 12 message type bytes.
}
size_t msg_type_len { 0 } ;
while ( msg_type_len < CMessageHeader : : COMMAND_SIZE & & contents [ msg_type_len ] ! = 0 ) {
// Verify that message type bytes before the first 0x00 are in range.
if ( contents [ msg_type_len ] < ' ' | | contents [ msg_type_len ] > 0x7F ) {
return { } ;
}
+ + msg_type_len ;
}
std : : string ret { reinterpret_cast < const char * > ( contents . data ( ) ) , msg_type_len } ;
while ( msg_type_len < CMessageHeader : : COMMAND_SIZE ) {
// Verify that message type bytes after the first 0x00 are also 0x00.
if ( contents [ msg_type_len ] ! = 0 ) return { } ;
+ + msg_type_len ;
}
// Strip message type bytes of contents.
contents = contents . subspan ( CMessageHeader : : COMMAND_SIZE ) ;
return { std : : move ( ret ) } ;
}
CNetMessage V2Transport : : GetReceivedMessage ( std : : chrono : : microseconds time , bool & reject_message ) noexcept
{
AssertLockNotHeld ( m_recv_mutex ) ;
LOCK ( m_recv_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_recv_state = = RecvState : : V1 ) return m_v1_fallback . GetReceivedMessage ( time , reject_message ) ;
2023-07-27 15:10:34 -04:00
Assume ( m_recv_state = = RecvState : : APP_READY ) ;
Span < const uint8_t > contents { m_recv_decode_buffer } ;
auto msg_type = GetMessageType ( contents ) ;
CDataStream ret ( m_recv_type , m_recv_version ) ;
CNetMessage msg { std : : move ( ret ) } ;
// Note that BIP324Cipher::EXPANSION also includes the length descriptor size.
msg . m_raw_message_size = m_recv_decode_buffer . size ( ) + BIP324Cipher : : EXPANSION ;
if ( msg_type ) {
reject_message = false ;
msg . m_type = std : : move ( * msg_type ) ;
msg . m_time = time ;
msg . m_message_size = contents . size ( ) ;
msg . m_recv . resize ( contents . size ( ) ) ;
std : : copy ( contents . begin ( ) , contents . end ( ) , UCharCast ( msg . m_recv . data ( ) ) ) ;
} else {
LogPrint ( BCLog : : NET , " V2 transport error: invalid message type (%u bytes contents), peer=%d \n " , m_recv_decode_buffer . size ( ) , m_nodeid ) ;
reject_message = true ;
}
2023-09-11 13:54:32 -04:00
ClearShrink ( m_recv_decode_buffer ) ;
2023-07-27 15:10:34 -04:00
SetReceiveState ( RecvState : : APP ) ;
return msg ;
}
bool V2Transport : : SetMessageToSend ( CSerializedNetMsg & msg ) noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_send_state = = SendState : : V1 ) return m_v1_fallback . SetMessageToSend ( msg ) ;
2023-07-27 15:10:34 -04:00
// We only allow adding a new message to be sent when in the READY state (so the packet cipher
// is available) and the send buffer is empty. This limits the number of messages in the send
// buffer to just one, and leaves the responsibility for queueing them up to the caller.
if ( ! ( m_send_state = = SendState : : READY & & m_send_buffer . empty ( ) ) ) return false ;
// Construct contents (encoding message type + payload).
2023-07-30 21:25:10 -04:00
std : : vector < uint8_t > contents ;
auto short_message_id = V2_MESSAGE_MAP ( msg . m_type ) ;
if ( short_message_id ) {
contents . resize ( 1 + msg . data . size ( ) ) ;
contents [ 0 ] = * short_message_id ;
std : : copy ( msg . data . begin ( ) , msg . data . end ( ) , contents . begin ( ) + 1 ) ;
} else {
// Initialize with zeroes, and then write the message type string starting at offset 1.
// This means contents[0] and the unused positions in contents[1..13] remain 0x00.
contents . resize ( 1 + CMessageHeader : : COMMAND_SIZE + msg . data . size ( ) , 0 ) ;
std : : copy ( msg . m_type . begin ( ) , msg . m_type . end ( ) , contents . data ( ) + 1 ) ;
std : : copy ( msg . data . begin ( ) , msg . data . end ( ) , contents . begin ( ) + 1 + CMessageHeader : : COMMAND_SIZE ) ;
}
2023-07-27 15:10:34 -04:00
// Construct ciphertext in send buffer.
m_send_buffer . resize ( contents . size ( ) + BIP324Cipher : : EXPANSION ) ;
m_cipher . Encrypt ( MakeByteSpan ( contents ) , { } , false , MakeWritableByteSpan ( m_send_buffer ) ) ;
m_send_type = msg . m_type ;
// Release memory
2023-09-11 13:54:32 -04:00
ClearShrink ( msg . data ) ;
2023-07-27 15:10:34 -04:00
return true ;
}
Transport : : BytesToSend V2Transport : : GetBytesToSend ( bool have_next_message ) const noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_send_state = = SendState : : V1 ) return m_v1_fallback . GetBytesToSend ( have_next_message ) ;
2023-09-08 13:55:47 -04:00
if ( m_send_state = = SendState : : MAYBE_V1 ) Assume ( m_send_buffer . empty ( ) ) ;
2023-07-27 15:10:34 -04:00
Assume ( m_send_pos < = m_send_buffer . size ( ) ) ;
return {
Span { m_send_buffer } . subspan ( m_send_pos ) ,
// We only have more to send after the current m_send_buffer if there is a (next)
// message to be sent, and we're capable of sending packets. */
have_next_message & & m_send_state = = SendState : : READY ,
m_send_type
} ;
}
void V2Transport : : MarkBytesSent ( size_t bytes_sent ) noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_send_state = = SendState : : V1 ) return m_v1_fallback . MarkBytesSent ( bytes_sent ) ;
2023-07-27 15:10:34 -04:00
m_send_pos + = bytes_sent ;
Assume ( m_send_pos < = m_send_buffer . size ( ) ) ;
2023-08-22 20:42:24 -04:00
if ( m_send_pos > = CMessageHeader : : HEADER_SIZE ) {
m_sent_v1_header_worth = true ;
}
2023-09-08 13:55:47 -04:00
// Wipe the buffer when everything is sent.
if ( m_send_pos = = m_send_buffer . size ( ) ) {
2023-07-27 15:10:34 -04:00
m_send_pos = 0 ;
2023-09-11 13:54:32 -04:00
ClearShrink ( m_send_buffer ) ;
2023-07-27 15:10:34 -04:00
}
}
2023-08-22 20:42:24 -04:00
bool V2Transport : : ShouldReconnectV1 ( ) const noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
AssertLockNotHeld ( m_recv_mutex ) ;
// Only outgoing connections need reconnection.
if ( ! m_initiating ) return false ;
LOCK ( m_recv_mutex ) ;
// We only reconnect in the very first state and when the receive buffer is empty. Together
// these conditions imply nothing has been received so far.
if ( m_recv_state ! = RecvState : : KEY ) return false ;
if ( ! m_recv_buffer . empty ( ) ) return false ;
// Check if we've sent enough for the other side to disconnect us (if it was V1).
LOCK ( m_send_mutex ) ;
return m_sent_v1_header_worth ;
}
2023-07-27 15:10:34 -04:00
size_t V2Transport : : GetSendMemoryUsage ( ) const noexcept
{
AssertLockNotHeld ( m_send_mutex ) ;
LOCK ( m_send_mutex ) ;
2023-08-29 22:37:18 -04:00
if ( m_send_state = = SendState : : V1 ) return m_v1_fallback . GetSendMemoryUsage ( ) ;
2023-07-27 15:10:34 -04:00
return sizeof ( m_send_buffer ) + memusage : : DynamicUsage ( m_send_buffer ) ;
}
2023-06-13 14:20:13 -04:00
std : : pair < size_t , bool > CConnman : : SocketSendData ( CNode & node ) const
2012-11-15 18:04:52 -05:00
{
2021-01-06 08:12:28 +01:00
auto it = node . vSendMsg . begin ( ) ;
2016-05-21 12:04:02 +02:00
size_t nSentSize = 0 ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
bool data_left { false } ; //!< second return value (whether unsent data remains)
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
std : : optional < bool > expected_more ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
while ( true ) {
if ( it ! = node . vSendMsg . end ( ) ) {
// If possible, move one message from the send queue to the transport. This fails when
2023-07-27 15:10:34 -04:00
// there is an existing message still being sent, or (for v2 transports) when the
// handshake has not yet completed.
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
size_t memusage = it - > GetMemoryUsage ( ) ;
if ( node . m_transport - > SetMessageToSend ( * it ) ) {
// Update memory usage of send buffer (as *it will be deleted).
node . m_send_memusage - = memusage ;
+ + it ;
}
}
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
const auto & [ data , more , msg_type ] = node . m_transport - > GetBytesToSend ( it ! = node . vSendMsg . end ( ) ) ;
// We rely on the 'more' value returned by GetBytesToSend to correctly predict whether more
// bytes are still to be sent, to correctly set the MSG_MORE flag. As a sanity check,
// verify that the previously returned 'more' was correct.
if ( expected_more . has_value ( ) ) Assume ( ! data . empty ( ) = = * expected_more ) ;
expected_more = more ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
data_left = ! data . empty ( ) ; // will be overwritten on next loop if all of data gets sent
2017-02-06 14:05:45 -05:00
int nBytes = 0 ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
if ( ! data . empty ( ) ) {
2021-04-23 15:30:46 +02:00
LOCK ( node . m_sock_mutex ) ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
// There is no socket in case we've already disconnected, or in test cases without
// real connections. In these cases, we bail out immediately and just leave things
// in the send queue and transport.
2021-04-23 15:15:23 +02:00
if ( ! node . m_sock ) {
2017-02-06 14:05:45 -05:00
break ;
2021-04-23 15:15:23 +02:00
}
2018-02-23 14:02:37 -05:00
int flags = MSG_NOSIGNAL | MSG_DONTWAIT ;
# ifdef MSG_MORE
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
if ( more ) {
2018-02-23 14:02:37 -05:00
flags | = MSG_MORE ;
}
# endif
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
nBytes = node . m_sock - > Send ( reinterpret_cast < const char * > ( data . data ( ) ) , data . size ( ) , flags ) ;
2017-02-06 14:05:45 -05:00
}
2013-03-24 16:52:24 +01:00
if ( nBytes > 0 ) {
2020-07-10 18:19:11 +02:00
node . m_last_send = GetTime < std : : chrono : : seconds > ( ) ;
2021-01-06 08:12:28 +01:00
node . nSendBytes + = nBytes ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
// Notify transport that bytes have been processed.
node . m_transport - > MarkBytesSent ( nBytes ) ;
// Update statistics per message type.
2023-09-26 18:07:36 -04:00
if ( ! msg_type . empty ( ) ) { // don't report v2 handshake bytes for now
node . AccountForSentBytes ( msg_type , nBytes ) ;
}
2016-05-21 12:04:02 +02:00
nSentSize + = nBytes ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
if ( ( size_t ) nBytes ! = data . size ( ) ) {
2013-03-24 16:52:24 +01:00
// could not send full message; stop sending more
break ;
}
} else {
if ( nBytes < 0 ) {
// error
int nErr = WSAGetLastError ( ) ;
2021-01-06 08:12:28 +01:00
if ( nErr ! = WSAEWOULDBLOCK & & nErr ! = WSAEMSGSIZE & & nErr ! = WSAEINTR & & nErr ! = WSAEINPROGRESS ) {
2020-12-18 07:40:37 +10:00
LogPrint ( BCLog : : NET , " socket send error for peer=%d: %s \n " , node . GetId ( ) , NetworkErrorString ( nErr ) ) ;
2021-01-06 08:12:28 +01:00
node . CloseSocketDisconnect ( ) ;
2013-03-24 16:52:24 +01:00
}
}
break ;
2012-11-15 18:04:52 -05:00
}
}
2013-03-24 16:52:24 +01:00
2023-07-24 13:23:39 -04:00
node . fPauseSend = node . m_send_memusage + node . m_transport - > GetSendMemoryUsage ( ) > nSendBufferMaxSize ;
2021-01-06 08:12:28 +01:00
if ( it = = node . vSendMsg . end ( ) ) {
2023-07-24 13:23:39 -04:00
assert ( node . m_send_memusage = = 0 ) ;
2013-03-24 16:52:24 +01:00
}
2021-01-06 08:12:28 +01:00
node . vSendMsg . erase ( node . vSendMsg . begin ( ) , it ) ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
return { nSentSize , data_left } ;
2012-11-15 18:04:52 -05:00
}
2010-08-29 16:58:15 +00:00
2020-09-18 12:47:08 +00:00
/** Try to find a connection to evict when the node is full.
* Extreme care must be taken to avoid opening the node to attacker
* triggered network partitioning .
* The strategy used here is to protect a small number of peers
* for each of several distinct characteristics which are difficult
* to forge . In order to partition a node the attacker must be
* simultaneously better at all of them than honest peers .
*/
bool CConnman : : AttemptToEvictConnection ( )
{
std : : vector < NodeEvictionCandidate > vEvictionCandidates ;
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * node : m_nodes ) {
2020-09-18 12:47:08 +00:00
if ( node - > fDisconnect )
continue ;
2022-03-11 10:14:31 +01:00
NodeEvictionCandidate candidate {
2022-07-01 01:23:02 +02:00
. id = node - > GetId ( ) ,
. m_connected = node - > m_connected ,
. m_min_ping_time = node - > m_min_ping_time ,
. m_last_block_time = node - > m_last_block_time ,
. m_last_tx_time = node - > m_last_tx_time ,
2020-07-20 18:46:13 +01:00
. fRelevantServices = node - > m_has_all_wanted_services ,
2022-07-01 01:23:02 +02:00
. m_relay_txs = node - > m_relays_txs . load ( ) ,
. fBloomFilter = node - > m_bloom_filter_loaded . load ( ) ,
. nKeyedNetGroup = node - > nKeyedNetGroup ,
. prefer_evict = node - > m_prefer_evict ,
. m_is_local = node - > addr . IsLocal ( ) ,
. m_network = node - > ConnectedThroughNetwork ( ) ,
. m_noban = node - > HasPermission ( NetPermissionFlags : : NoBan ) ,
2023-03-24 15:29:21 +01:00
. m_conn_type = node - > m_conn_type ,
2022-03-11 10:14:31 +01:00
} ;
2020-09-18 12:47:08 +00:00
vEvictionCandidates . push_back ( candidate ) ;
}
}
2021-03-15 10:41:30 +08:00
const std : : optional < NodeId > node_id_to_evict = SelectNodeToEvict ( std : : move ( vEvictionCandidates ) ) ;
2020-09-18 12:47:08 +00:00
if ( ! node_id_to_evict ) {
return false ;
}
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2020-09-18 12:47:08 +00:00
if ( pnode - > GetId ( ) = = * node_id_to_evict ) {
2020-12-18 07:18:28 +10:00
LogPrint ( BCLog : : NET , " selected %s connection for eviction peer=%d; disconnecting \n " , pnode - > ConnectionTypeAsString ( ) , pnode - > GetId ( ) ) ;
2017-07-20 11:32:47 +02:00
pnode - > fDisconnect = true ;
2016-04-18 15:58:19 -04:00
return true ;
}
}
return false ;
2015-08-13 02:58:58 -07:00
}
2016-04-16 14:47:18 -04:00
void CConnman : : AcceptConnection ( const ListenSocket & hListenSocket ) {
2015-08-13 02:00:10 -07:00
struct sockaddr_storage sockaddr ;
socklen_t len = sizeof ( sockaddr ) ;
2021-04-23 12:15:15 +02:00
auto sock = hListenSocket . sock - > Accept ( ( struct sockaddr * ) & sockaddr , & len ) ;
2015-08-13 02:00:10 -07:00
CAddress addr ;
2021-04-23 12:15:15 +02:00
if ( ! sock ) {
2020-11-24 12:15:07 +01:00
const int nErr = WSAGetLastError ( ) ;
if ( nErr ! = WSAEWOULDBLOCK ) {
LogPrintf ( " socket error accept failed: %s \n " , NetworkErrorString ( nErr ) ) ;
2017-05-30 11:59:42 +02:00
}
2020-11-24 12:15:07 +01:00
return ;
}
if ( ! addr . SetSockAddr ( ( const struct sockaddr * ) & sockaddr ) ) {
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Warning , " Unknown socket family \n " ) ;
2021-09-13 13:02:05 +02:00
} else {
addr = CAddress { MaybeFlipIPv6toCJDNS ( addr ) , NODE_NONE } ;
2017-05-30 11:59:42 +02:00
}
2015-08-13 02:00:10 -07:00
2021-04-13 15:11:20 +02:00
const CAddress addr_bind { MaybeFlipIPv6toCJDNS ( GetBindAddress ( * sock ) ) , NODE_NONE } ;
2020-11-24 12:24:18 +01:00
2022-09-01 18:50:26 +10:00
NetPermissionFlags permission_flags = NetPermissionFlags : : None ;
hListenSocket . AddSocketPermissionFlags ( permission_flags ) ;
2020-11-24 12:40:03 +01:00
2022-09-01 18:50:26 +10:00
CreateNodeFromAcceptedSocket ( std : : move ( sock ) , permission_flags , addr_bind , addr ) ;
2020-11-24 12:40:03 +01:00
}
2021-04-13 12:14:57 +02:00
void CConnman : : CreateNodeFromAcceptedSocket ( std : : unique_ptr < Sock > & & sock ,
2022-09-01 18:50:26 +10:00
NetPermissionFlags permission_flags ,
2020-11-24 12:40:03 +01:00
const CAddress & addr_bind ,
const CAddress & addr )
{
int nInbound = 0 ;
int nMaxInbound = nMaxConnections - m_max_outbound ;
2022-09-01 18:50:26 +10:00
AddWhitelistPermissionFlags ( permission_flags , addr ) ;
if ( NetPermissions : : HasFlag ( permission_flags , NetPermissionFlags : : Implicit ) ) {
NetPermissions : : ClearFlag ( permission_flags , NetPermissionFlags : : Implicit ) ;
if ( gArgs . GetBoolArg ( " -whitelistforcerelay " , DEFAULT_WHITELISTFORCERELAY ) ) NetPermissions : : AddFlag ( permission_flags , NetPermissionFlags : : ForceRelay ) ;
if ( gArgs . GetBoolArg ( " -whitelistrelay " , DEFAULT_WHITELISTRELAY ) ) NetPermissions : : AddFlag ( permission_flags , NetPermissionFlags : : Relay ) ;
NetPermissions : : AddFlag ( permission_flags , NetPermissionFlags : : Mempool ) ;
NetPermissions : : AddFlag ( permission_flags , NetPermissionFlags : : NoBan ) ;
2019-06-20 18:37:51 +09:00
}
2015-08-13 02:00:10 -07:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-07-28 13:39:38 -07:00
if ( pnode - > IsInboundConn ( ) ) nInbound + + ;
2017-07-20 11:32:47 +02:00
}
2015-08-13 02:00:10 -07:00
}
2013-03-26 02:33:25 +01:00
if ( ! fNetworkActive ) {
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " connection from %s dropped: not accepting new connections \n " , addr . ToStringAddrPort ( ) ) ;
2013-03-26 02:33:25 +01:00
return ;
}
2021-04-13 14:29:14 +02:00
if ( ! sock - > IsSelectable ( ) ) {
2022-07-15 14:13:39 +02:00
LogPrintf ( " connection from %s dropped: non-selectable socket \n " , addr . ToStringAddrPort ( ) ) ;
2015-08-13 02:16:46 -07:00
return ;
2015-08-13 02:00:10 -07:00
}
2015-08-13 02:16:46 -07:00
2015-10-21 23:52:29 +00:00
// According to the internet TCP_NODELAY is not carried into accepted sockets
// on all platforms. Set it again here just to be sure.
2021-04-13 14:01:44 +02:00
const int on { 1 } ;
if ( sock - > SetSockOpt ( IPPROTO_TCP , TCP_NODELAY , & on , sizeof ( on ) ) = = SOCKET_ERROR ) {
LogPrint ( BCLog : : NET , " connection from %s: unable to set TCP_NODELAY, continuing anyway \n " ,
2022-07-15 14:13:39 +02:00
addr . ToStringAddrPort ( ) ) ;
2021-04-13 14:01:44 +02:00
}
2015-10-21 23:52:29 +00:00
2020-06-08 18:46:53 -07:00
// Don't accept connections from banned peers.
2020-07-14 10:24:43 +01:00
bool banned = m_banman & & m_banman - > IsBanned ( addr ) ;
2022-09-01 18:50:26 +10:00
if ( ! NetPermissions : : HasFlag ( permission_flags , NetPermissionFlags : : NoBan ) & & banned )
2015-08-13 02:00:10 -07:00
{
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " connection from %s dropped (banned) \n " , addr . ToStringAddrPort ( ) ) ;
2015-08-13 02:16:46 -07:00
return ;
2015-08-13 02:00:10 -07:00
}
2015-08-13 02:16:46 -07:00
2020-06-08 18:46:53 -07:00
// Only accept connections from discouraged peers if our inbound slots aren't (almost) full.
2020-07-14 10:24:43 +01:00
bool discouraged = m_banman & & m_banman - > IsDiscouraged ( addr ) ;
2022-09-01 18:50:26 +10:00
if ( ! NetPermissions : : HasFlag ( permission_flags , NetPermissionFlags : : NoBan ) & & nInbound + 1 > = nMaxInbound & & discouraged )
2020-06-08 18:46:53 -07:00
{
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " connection from %s dropped (discouraged) \n " , addr . ToStringAddrPort ( ) ) ;
2020-06-08 18:46:53 -07:00
return ;
}
2015-08-13 02:19:17 -07:00
if ( nInbound > = nMaxInbound )
2015-08-13 02:00:10 -07:00
{
2016-05-22 05:55:15 +00:00
if ( ! AttemptToEvictConnection ( ) ) {
2015-08-13 02:58:58 -07:00
// No connection to evict, disconnect the new connection
2016-12-25 20:19:40 +00:00
LogPrint ( BCLog : : NET , " failed to find an eviction candidate - connection dropped (full) \n " ) ;
2015-08-13 02:58:58 -07:00
return ;
}
2015-08-13 02:00:10 -07:00
}
2016-10-26 15:10:15 -04:00
NodeId id = GetNewNodeId ( ) ;
uint64_t nonce = GetDeterministicRandomizer ( RANDOMIZER_ID_LOCALHOSTNONCE ) . Write ( id ) . Finalize ( ) ;
2019-06-20 18:37:51 +09:00
ServiceFlags nodeServices = nLocalServices ;
2022-09-01 18:50:26 +10:00
if ( NetPermissions : : HasFlag ( permission_flags , NetPermissionFlags : : BloomFilter ) ) {
2019-06-20 18:37:51 +09:00
nodeServices = static_cast < ServiceFlags > ( nodeServices | NODE_BLOOM ) ;
}
2020-09-30 19:07:36 +03:00
const bool inbound_onion = std : : find ( m_onion_binds . begin ( ) , m_onion_binds . end ( ) , addr_bind ) ! = m_onion_binds . end ( ) ;
2023-08-21 16:55:47 -04:00
// The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
// detected, so use it whenever we signal NODE_P2P_V2.
const bool use_v2transport ( nodeServices & NODE_P2P_V2 ) ;
2022-01-28 06:31:41 +01:00
CNode * pnode = new CNode ( id ,
std : : move ( sock ) ,
addr ,
CalculateKeyedNetGroup ( addr ) ,
nonce ,
addr_bind ,
/*addrNameIn=*/ " " ,
ConnectionType : : INBOUND ,
2022-09-01 18:44:07 +10:00
inbound_onion ,
CNodeOptions {
2023-03-24 15:45:50 +01:00
. permission_flags = permission_flags ,
. prefer_evict = discouraged ,
. recv_flood_size = nReceiveFloodSize ,
2023-08-21 16:55:47 -04:00
. use_v2transport = use_v2transport ,
2022-09-01 18:44:07 +10:00
} ) ;
2015-08-13 02:16:46 -07:00
pnode - > AddRef ( ) ;
2020-07-20 14:01:05 +01:00
m_msgproc - > InitializeNode ( * pnode , nodeServices ) ;
2015-08-13 02:00:10 -07:00
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " connection from %s accepted \n " , addr . ToStringAddrPort ( ) ) ;
2015-08-13 02:16:46 -07:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
m_nodes . push_back ( pnode ) ;
2015-08-13 02:00:10 -07:00
}
2019-11-23 11:42:23 -05:00
// We received a new connection, harvest entropy from the time (and our peer count)
RandAddEvent ( ( uint32_t ) id ) ;
2015-08-13 02:00:10 -07:00
}
2020-06-02 09:46:41 -07:00
bool CConnman : : AddConnection ( const std : : string & address , ConnectionType conn_type )
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2021-05-31 22:49:42 +02:00
std : : optional < int > max_connections ;
switch ( conn_type ) {
case ConnectionType : : INBOUND :
case ConnectionType : : MANUAL :
return false ;
case ConnectionType : : OUTBOUND_FULL_RELAY :
max_connections = m_max_outbound_full_relay ;
break ;
case ConnectionType : : BLOCK_RELAY :
max_connections = m_max_outbound_block_relay ;
break ;
// no limit for ADDR_FETCH because -seednode has no limit either
case ConnectionType : : ADDR_FETCH :
break ;
2021-08-23 10:42:39 +01:00
// no limit for FEELER connections since they're short-lived
case ConnectionType : : FEELER :
break ;
2021-05-31 22:49:42 +02:00
} // no default case, so the compiler can warn about missing cases
2020-06-02 09:46:41 -07:00
// Count existing connections
2021-08-28 20:57:52 +02:00
int existing_connections = WITH_LOCK ( m_nodes_mutex ,
2023-03-24 15:29:21 +01:00
return std : : count_if ( m_nodes . begin ( ) , m_nodes . end ( ) , [ conn_type ] ( CNode * node ) { return node - > m_conn_type = = conn_type ; } ) ; ) ;
2020-06-02 09:46:41 -07:00
// Max connections of specified type already exist
2021-05-31 22:49:42 +02:00
if ( max_connections ! = std : : nullopt & & existing_connections > = max_connections ) return false ;
2020-06-02 09:46:41 -07:00
// Max total outbound connections already exist
CSemaphoreGrant grant ( * semOutbound , true ) ;
if ( ! grant ) return false ;
2023-08-21 18:14:52 -04:00
OpenNetworkConnection ( CAddress ( ) , false , std : : move ( grant ) , address . c_str ( ) , conn_type , /*use_v2transport=*/ false ) ;
2020-06-02 09:46:41 -07:00
return true ;
}
2018-09-24 16:36:58 -04:00
void CConnman : : DisconnectNodes ( )
2010-08-29 16:58:15 +00:00
{
2023-08-22 20:42:24 -04:00
AssertLockNotHeld ( m_nodes_mutex ) ;
AssertLockNotHeld ( m_reconnections_mutex ) ;
// Use a temporary variable to accumulate desired reconnections, so we don't need
// m_reconnections_mutex while holding m_nodes_mutex.
decltype ( m_reconnections ) reconnections_to_add ;
2010-08-29 16:58:15 +00:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
2018-05-10 18:23:22 +02:00
2018-09-24 16:36:58 -04:00
if ( ! fNetworkActive ) {
// Disconnect any connected nodes
2021-08-28 20:57:52 +02:00
for ( CNode * pnode : m_nodes ) {
2018-09-24 16:36:58 -04:00
if ( ! pnode - > fDisconnect ) {
LogPrint ( BCLog : : NET , " Network not active, dropping peer=%d \n " , pnode - > GetId ( ) ) ;
pnode - > fDisconnect = true ;
2018-05-10 18:23:22 +02:00
}
}
2018-09-24 16:36:58 -04:00
}
2018-05-10 18:23:22 +02:00
2018-09-24 16:36:58 -04:00
// Disconnect unused nodes
2021-08-28 20:57:52 +02:00
std : : vector < CNode * > nodes_copy = m_nodes ;
for ( CNode * pnode : nodes_copy )
2018-09-24 16:36:58 -04:00
{
if ( pnode - > fDisconnect )
2010-08-29 16:58:15 +00:00
{
2021-08-28 20:57:52 +02:00
// remove from m_nodes
m_nodes . erase ( remove ( m_nodes . begin ( ) , m_nodes . end ( ) , pnode ) , m_nodes . end ( ) ) ;
2010-08-29 16:58:15 +00:00
2023-08-22 20:42:24 -04:00
// Add to reconnection list if appropriate. We don't reconnect right here, because
// the creation of a connection is a blocking operation (up to several seconds),
// and we don't want to hold up the socket handler thread for that long.
if ( pnode - > m_transport - > ShouldReconnectV1 ( ) ) {
reconnections_to_add . push_back ( {
. addr_connect = pnode - > addr ,
. grant = std : : move ( pnode - > grantOutbound ) ,
. destination = pnode - > m_dest ,
. conn_type = pnode - > m_conn_type ,
. use_v2transport = false } ) ;
LogPrint ( BCLog : : NET , " retrying with v1 transport protocol for peer=%d \n " , pnode - > GetId ( ) ) ;
}
2018-09-24 16:36:58 -04:00
// release outbound grant (if any)
pnode - > grantOutbound . Release ( ) ;
2012-04-04 16:01:57 +02:00
2018-09-24 16:36:58 -04:00
// close socket and cleanup
pnode - > CloseSocketDisconnect ( ) ;
2010-08-29 16:58:15 +00:00
2023-06-11 12:26:18 -07:00
// update connection count by network
if ( pnode - > IsManualOrFullOutboundConn ( ) ) - - m_network_conn_counts [ pnode - > addr . GetNetwork ( ) ] ;
2018-09-24 16:36:58 -04:00
// hold in disconnected pool until all refs are released
pnode - > Release ( ) ;
2021-08-28 20:57:52 +02:00
m_nodes_disconnected . push_back ( pnode ) ;
2010-08-29 16:58:15 +00:00
}
2013-07-25 02:25:25 +02:00
}
2018-09-24 16:36:58 -04:00
}
{
// Delete disconnected nodes
2021-08-28 20:57:52 +02:00
std : : list < CNode * > nodes_disconnected_copy = m_nodes_disconnected ;
for ( CNode * pnode : nodes_disconnected_copy )
2013-07-25 02:25:25 +02:00
{
2021-04-22 11:06:13 +02:00
// Destroy the object only after other threads have stopped using it.
2018-09-24 16:36:58 -04:00
if ( pnode - > GetRefCount ( ) < = 0 ) {
2021-08-28 20:57:52 +02:00
m_nodes_disconnected . remove ( pnode ) ;
2021-04-22 11:06:13 +02:00
DeleteNode ( pnode ) ;
2010-08-29 16:58:15 +00:00
}
}
2018-09-24 16:36:58 -04:00
}
2023-08-22 20:42:24 -04:00
{
// Move entries from reconnections_to_add to m_reconnections.
LOCK ( m_reconnections_mutex ) ;
m_reconnections . splice ( m_reconnections . end ( ) , std : : move ( reconnections_to_add ) ) ;
}
2018-09-24 16:36:58 -04:00
}
void CConnman : : NotifyNumConnectionsChanged ( )
{
2021-08-28 20:57:52 +02:00
size_t nodes_size ;
2018-09-24 16:36:58 -04:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
nodes_size = m_nodes . size ( ) ;
2018-09-24 16:36:58 -04:00
}
2021-08-28 20:57:52 +02:00
if ( nodes_size ! = nPrevNodeCount ) {
nPrevNodeCount = nodes_size ;
2021-08-18 13:41:39 +08:00
if ( m_client_interface ) {
2021-08-28 20:57:52 +02:00
m_client_interface - > NotifyNumConnectionsChanged ( nodes_size ) ;
2021-08-18 13:37:27 +08:00
}
2018-09-24 16:36:58 -04:00
}
}
2020-07-10 18:19:11 +02:00
bool CConnman : : ShouldRunInactivityChecks ( const CNode & node , std : : chrono : : seconds now ) const
2021-02-12 10:01:55 +00:00
{
2021-12-13 12:32:28 +01:00
return node . m_connected + m_peer_connect_timeout < now ;
2021-02-12 10:01:55 +00:00
}
2021-01-06 11:12:51 +00:00
bool CConnman : : InactivityCheck ( const CNode & node ) const
2018-09-24 16:43:00 -04:00
{
2020-07-10 18:19:11 +02:00
// Tests that see disconnects after using mocktime can start nodes with a
// large timeout. For example, -peertimeout=999999999.
const auto now { GetTime < std : : chrono : : seconds > ( ) } ;
const auto last_send { node . m_last_send . load ( ) } ;
const auto last_recv { node . m_last_recv . load ( ) } ;
2021-01-06 11:12:51 +00:00
2021-02-16 15:55:03 +00:00
if ( ! ShouldRunInactivityChecks ( node , now ) ) return false ;
2020-07-10 18:19:11 +02:00
if ( last_recv . count ( ) = = 0 | | last_send . count ( ) = = 0 ) {
LogPrint ( BCLog : : NET , " socket no message in first %i seconds, %d %d peer=%d \n " , count_seconds ( m_peer_connect_timeout ) , last_recv . count ( ) ! = 0 , last_send . count ( ) ! = 0 , node . GetId ( ) ) ;
2021-01-06 11:12:51 +00:00
return true ;
}
2020-07-10 18:19:11 +02:00
if ( now > last_send + TIMEOUT_INTERVAL ) {
LogPrint ( BCLog : : NET , " socket sending timeout: %is peer=%d \n " , count_seconds ( now - last_send ) , node . GetId ( ) ) ;
2021-01-06 11:12:51 +00:00
return true ;
}
2020-07-10 18:19:11 +02:00
if ( now > last_recv + TIMEOUT_INTERVAL ) {
LogPrint ( BCLog : : NET , " socket receive timeout: %is peer=%d \n " , count_seconds ( now - last_recv ) , node . GetId ( ) ) ;
2021-01-06 11:12:51 +00:00
return true ;
}
if ( ! node . fSuccessfullyConnected ) {
2020-12-18 07:41:34 +10:00
LogPrint ( BCLog : : NET , " version handshake timeout peer=%d \n " , node . GetId ( ) ) ;
2021-01-06 11:12:51 +00:00
return true ;
}
return false ;
2018-09-24 16:43:00 -04:00
}
2010-08-29 16:58:15 +00:00
2021-05-04 18:37:19 +02:00
Sock : : EventsPerSock CConnman : : GenerateWaitSockets ( Span < CNode * const > nodes )
2018-09-24 16:36:58 -04:00
{
2021-05-04 18:37:19 +02:00
Sock : : EventsPerSock events_per_sock ;
2018-09-24 17:03:17 -04:00
for ( const ListenSocket & hListenSocket : vhListenSocket ) {
2021-05-04 18:37:19 +02:00
events_per_sock . emplace ( hListenSocket . sock , Sock : : Events { Sock : : RECV } ) ;
2018-09-24 17:03:17 -04:00
}
2014-06-24 09:09:45 +02:00
2021-10-25 13:49:33 +02:00
for ( CNode * pnode : nodes ) {
bool select_recv = ! pnode - > fPauseRecv ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
bool select_send ;
{
LOCK ( pnode - > cs_vSend ) ;
// Sending is possible if either there are bytes to send right now, or if there will be
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// once a potential message from vSendMsg is handed to the transport. GetBytesToSend
// determines both of these in a single call.
const auto & [ to_send , more , _msg_type ] = pnode - > m_transport - > GetBytesToSend ( ! pnode - > vSendMsg . empty ( ) ) ;
select_send = ! to_send . empty ( ) | | more ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
}
2023-06-13 14:20:13 -04:00
if ( ! select_recv & & ! select_send ) continue ;
2017-02-06 13:47:24 -05:00
2021-04-23 15:30:46 +02:00
LOCK ( pnode - > m_sock_mutex ) ;
2023-06-13 14:20:13 -04:00
if ( pnode - > m_sock ) {
Sock : : Event event = ( select_send ? Sock : : SEND : 0 ) | ( select_recv ? Sock : : RECV : 0 ) ;
events_per_sock . emplace ( pnode - > m_sock , Sock : : Events { event } ) ;
2021-04-23 15:15:23 +02:00
}
2018-09-26 21:54:52 -04:00
}
2021-05-04 18:37:19 +02:00
return events_per_sock ;
2018-09-26 21:54:52 -04:00
}
2018-09-26 21:51:46 -04:00
void CConnman : : SocketHandler ( )
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2021-05-04 18:37:19 +02:00
Sock : : EventsPerSock events_per_sock ;
2018-09-26 21:51:46 -04:00
2018-09-24 17:03:17 -04:00
{
2021-10-25 11:03:58 +02:00
const NodesSnapshot snap { * this , /*shuffle=*/ false } ;
2018-09-24 17:03:17 -04:00
2021-05-04 18:37:19 +02:00
const auto timeout = std : : chrono : : milliseconds ( SELECT_TIMEOUT_MILLISECONDS ) ;
2021-10-25 11:03:58 +02:00
// Check for the readiness of the already connected sockets and the
// listening sockets in one call ("readiness" as in poll(2) or
// select(2)). If none are ready, wait for a short while and return
// empty sets.
2021-05-04 18:37:19 +02:00
events_per_sock = GenerateWaitSockets ( snap . Nodes ( ) ) ;
if ( events_per_sock . empty ( ) | | ! events_per_sock . begin ( ) - > first - > WaitMany ( timeout , events_per_sock ) ) {
interruptNet . sleep_for ( timeout ) ;
}
2018-09-26 21:51:46 -04:00
2021-10-25 11:03:58 +02:00
// Service (send/receive) each of the already connected nodes.
2021-05-04 18:37:19 +02:00
SocketHandlerConnected ( snap . Nodes ( ) , events_per_sock ) ;
2018-09-24 17:03:17 -04:00
}
2021-10-25 11:03:58 +02:00
// Accept new connections from listening sockets.
2021-05-04 18:37:19 +02:00
SocketHandlerListening ( events_per_sock ) ;
2021-10-25 11:03:58 +02:00
}
void CConnman : : SocketHandlerConnected ( const std : : vector < CNode * > & nodes ,
2021-05-04 18:37:19 +02:00
const Sock : : EventsPerSock & events_per_sock )
2021-10-25 11:03:58 +02:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2021-10-25 11:03:58 +02:00
for ( CNode * pnode : nodes ) {
2018-09-24 17:03:17 -04:00
if ( interruptNet )
return ;
2010-08-29 16:58:15 +00:00
//
2018-09-24 17:03:17 -04:00
// Receive
2010-08-29 16:58:15 +00:00
//
2018-09-24 17:03:17 -04:00
bool recvSet = false ;
bool sendSet = false ;
bool errorSet = false ;
2010-08-29 16:58:15 +00:00
{
2021-04-23 15:30:46 +02:00
LOCK ( pnode - > m_sock_mutex ) ;
2021-04-23 15:15:23 +02:00
if ( ! pnode - > m_sock ) {
2018-09-24 17:03:17 -04:00
continue ;
2021-04-23 15:15:23 +02:00
}
2021-05-04 18:37:19 +02:00
const auto it = events_per_sock . find ( pnode - > m_sock ) ;
if ( it ! = events_per_sock . end ( ) ) {
recvSet = it - > second . occurred & Sock : : RECV ;
sendSet = it - > second . occurred & Sock : : SEND ;
errorSet = it - > second . occurred & Sock : : ERR ;
}
2010-08-29 16:58:15 +00:00
}
2023-06-13 14:20:13 -04:00
if ( sendSet ) {
// Send data
auto [ bytes_sent , data_left ] = WITH_LOCK ( pnode - > cs_vSend , return SocketSendData ( * pnode ) ) ;
if ( bytes_sent ) {
RecordBytesSent ( bytes_sent ) ;
// If both receiving and (non-optimistic) sending were possible, we first attempt
// sending. If that succeeds, but does not fully drain the send queue, do not
// attempt to receive. This avoids needlessly queueing data if the remote peer
// is slow at receiving data, by means of TCP flow control. We only do this when
// sending actually succeeded to make sure progress is always made; otherwise a
// deadlock would be possible when both sides have data to send, but neither is
// receiving.
if ( data_left ) recvSet = false ;
}
}
2018-09-24 17:03:17 -04:00
if ( recvSet | | errorSet )
2010-08-29 16:58:15 +00:00
{
2018-09-24 17:03:17 -04:00
// typical socket buffer is 8K-64K
2020-11-20 10:16:10 +01:00
uint8_t pchBuf [ 0x10000 ] ;
2018-09-24 17:03:17 -04:00
int nBytes = 0 ;
2017-02-06 14:05:45 -05:00
{
2021-04-23 15:30:46 +02:00
LOCK ( pnode - > m_sock_mutex ) ;
2021-04-23 15:15:23 +02:00
if ( ! pnode - > m_sock ) {
2017-02-06 14:05:45 -05:00
continue ;
2021-04-23 15:15:23 +02:00
}
nBytes = pnode - > m_sock - > Recv ( pchBuf , sizeof ( pchBuf ) , MSG_DONTWAIT ) ;
2017-02-06 14:05:45 -05:00
}
2018-09-24 17:03:17 -04:00
if ( nBytes > 0 )
2010-08-29 16:58:15 +00:00
{
2018-09-24 17:03:17 -04:00
bool notify = false ;
2021-11-02 10:07:46 -04:00
if ( ! pnode - > ReceiveMsgBytes ( { pchBuf , ( size_t ) nBytes } , notify ) ) {
2017-03-06 17:54:08 +01:00
pnode - > CloseSocketDisconnect ( ) ;
2021-11-02 10:07:46 -04:00
}
2018-09-24 17:03:17 -04:00
RecordBytesRecv ( nBytes ) ;
if ( notify ) {
2023-03-24 15:45:50 +01:00
pnode - > MarkReceivedMsgsForProcessing ( ) ;
2018-09-24 17:03:17 -04:00
WakeMessageHandler ( ) ;
2010-08-29 16:58:15 +00:00
}
}
2018-09-24 17:03:17 -04:00
else if ( nBytes = = 0 )
2010-08-29 16:58:15 +00:00
{
2018-09-24 17:03:17 -04:00
// socket closed gracefully
if ( ! pnode - > fDisconnect ) {
2019-10-25 15:30:35 +02:00
LogPrint ( BCLog : : NET , " socket closed for peer=%d \n " , pnode - > GetId ( ) ) ;
2016-04-18 21:44:42 -04:00
}
2018-09-24 17:03:17 -04:00
pnode - > CloseSocketDisconnect ( ) ;
2010-08-29 16:58:15 +00:00
}
2018-09-24 17:03:17 -04:00
else if ( nBytes < 0 )
2010-08-29 16:58:15 +00:00
{
2018-09-24 17:03:17 -04:00
// error
int nErr = WSAGetLastError ( ) ;
if ( nErr ! = WSAEWOULDBLOCK & & nErr ! = WSAEMSGSIZE & & nErr ! = WSAEINTR & & nErr ! = WSAEINPROGRESS )
2017-02-07 15:23:17 -05:00
{
2019-10-25 15:30:35 +02:00
if ( ! pnode - > fDisconnect ) {
LogPrint ( BCLog : : NET , " socket recv error for peer=%d: %s \n " , pnode - > GetId ( ) , NetworkErrorString ( nErr ) ) ;
}
2018-09-24 17:03:17 -04:00
pnode - > CloseSocketDisconnect ( ) ;
2017-02-07 15:23:17 -05:00
}
2010-08-29 16:58:15 +00:00
}
}
2018-09-24 17:03:17 -04:00
2021-02-16 15:55:03 +00:00
if ( InactivityCheck ( * pnode ) ) pnode - > fDisconnect = true ;
2018-09-24 17:03:17 -04:00
}
}
2021-05-04 18:37:19 +02:00
void CConnman : : SocketHandlerListening ( const Sock : : EventsPerSock & events_per_sock )
2021-10-25 11:03:58 +02:00
{
for ( const ListenSocket & listen_socket : vhListenSocket ) {
if ( interruptNet ) {
return ;
}
2021-05-04 18:37:19 +02:00
const auto it = events_per_sock . find ( listen_socket . sock ) ;
if ( it ! = events_per_sock . end ( ) & & it - > second . occurred & Sock : : RECV ) {
2021-10-25 11:03:58 +02:00
AcceptConnection ( listen_socket ) ;
}
2018-09-24 17:03:17 -04:00
}
}
void CConnman : : ThreadSocketHandler ( )
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2018-09-24 17:03:17 -04:00
while ( ! interruptNet )
{
DisconnectNodes ( ) ;
NotifyNumConnectionsChanged ( ) ;
SocketHandler ( ) ;
2010-08-29 16:58:15 +00:00
}
}
2016-12-31 02:05:21 -05:00
void CConnman : : WakeMessageHandler ( )
{
2016-12-31 02:05:26 -05:00
{
2019-05-30 13:44:02 +10:00
LOCK ( mutexMsgProc ) ;
2016-12-31 02:05:26 -05:00
fMsgProcWake = true ;
}
2016-12-31 02:05:21 -05:00
condMsgProc . notify_one ( ) ;
}
2010-08-29 16:58:15 +00:00
2016-04-16 14:47:18 -04:00
void CConnman : : ThreadDNSAddressSeed ( )
2011-11-21 12:25:00 -05:00
{
2019-03-07 15:30:59 -08:00
FastRandomContext rng ;
2023-09-12 13:42:36 +02:00
std : : vector < std : : string > seeds = m_params . DNSSeeds ( ) ;
2019-03-07 15:30:59 -08:00
Shuffle ( seeds . begin ( ) , seeds . end ( ) , rng ) ;
int seeds_right_now = 0 ; // Number of seeds left before testing if we have enough connections
int found = 0 ;
2014-07-29 11:04:46 -04:00
2019-03-07 15:30:59 -08:00
if ( gArgs . GetBoolArg ( " -forcednsseed " , DEFAULT_FORCEDNSSEED ) ) {
// When -forcednsseed is provided, query all.
seeds_right_now = seeds . size ( ) ;
2023-01-13 14:23:38 -08:00
} else if ( addrman . Size ( ) = = 0 ) {
2020-02-11 13:20:21 +10:00
// If we have no known peers, query all.
2020-05-28 10:07:49 +10:00
// This will occur on the first run, or if peers.dat has been
// deleted.
2020-02-11 13:20:21 +10:00
seeds_right_now = seeds . size ( ) ;
2014-07-29 11:04:46 -04:00
}
2020-02-11 13:20:21 +10:00
// goal: only query DNS seed if address need is acute
// * If we have a reasonable number of peers in addrman, spend
// some time trying them first. This improves user privacy by
// creating fewer identifying DNS requests, reduces trust by
// giving seeds less influence on the network topology, and
// reduces traffic to the seeds.
// * When querying DNS seeds query a few at once, this ensures
// that we don't give DNS seeds the ability to eclipse nodes
// that query them.
// * If we continue having problems, eventually query all the
// DNS seeds, and if that fails too, also try the fixed seeds.
// (done in ThreadOpenConnections)
2023-01-13 14:23:38 -08:00
const std : : chrono : : seconds seeds_wait_time = ( addrman . Size ( ) > = DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS ) ;
2020-02-11 13:20:21 +10:00
2019-03-07 15:30:59 -08:00
for ( const std : : string & seed : seeds ) {
2020-02-11 13:20:21 +10:00
if ( seeds_right_now = = 0 ) {
seeds_right_now + = DNSSEEDS_TO_QUERY_AT_ONCE ;
2011-03-08 22:40:50 -05:00
2023-01-13 14:23:38 -08:00
if ( addrman . Size ( ) > 0 ) {
2020-02-11 13:20:21 +10:00
LogPrintf ( " Waiting %d seconds before querying DNS seeds. \n " , seeds_wait_time . count ( ) ) ;
std : : chrono : : seconds to_wait = seeds_wait_time ;
while ( to_wait . count ( ) > 0 ) {
2020-05-28 10:07:49 +10:00
// if sleeping for the MANY_PEERS interval, wake up
// early to see if we have enough peers and can stop
// this thread entirely freeing up its resources
2020-02-11 13:20:21 +10:00
std : : chrono : : seconds w = std : : min ( DNSSEEDS_DELAY_FEW_PEERS , to_wait ) ;
if ( ! interruptNet . sleep_for ( w ) ) return ;
to_wait - = w ;
int nRelevant = 0 ;
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2021-05-21 13:03:00 +10:00
if ( pnode - > fSuccessfullyConnected & & pnode - > IsFullOutboundConn ( ) ) + + nRelevant ;
2020-02-11 13:20:21 +10:00
}
}
if ( nRelevant > = 2 ) {
if ( found > 0 ) {
LogPrintf ( " %d addresses found from DNS seeds \n " , found ) ;
LogPrintf ( " P2P peers available. Finished DNS seeding. \n " ) ;
} else {
LogPrintf ( " P2P peers available. Skipped DNS seeding. \n " ) ;
}
return ;
}
}
2019-03-07 15:30:59 -08:00
}
}
2013-01-29 23:13:17 -05:00
2020-02-12 01:32:46 +10:00
if ( interruptNet ) return ;
2020-05-28 10:07:49 +10:00
// hold off on querying seeds if P2P network deactivated
2020-02-12 01:32:46 +10:00
if ( ! fNetworkActive ) {
LogPrintf ( " Waiting for network to be reactivated before querying DNS seeds. \n " ) ;
do {
if ( ! interruptNet . sleep_for ( std : : chrono : : seconds { 1 } ) ) return ;
} while ( ! fNetworkActive ) ;
2017-04-14 16:29:57 -04:00
}
2020-02-11 13:20:21 +10:00
2019-03-07 15:30:59 -08:00
LogPrintf ( " Loading addresses from DNS seed %s \n " , seed ) ;
2020-09-25 12:33:47 -07:00
// If -proxy is in use, we make an ADDR_FETCH connection to the DNS resolved peer address
// for the base dns seed domain in chainparams
2013-01-29 23:13:17 -05:00
if ( HaveNameProxy ( ) ) {
2020-07-17 14:56:34 -07:00
AddAddrFetch ( seed ) ;
2013-01-29 23:13:17 -05:00
} else {
2016-04-15 19:53:45 -04:00
std : : vector < CAddress > vAdd ;
2017-10-04 17:59:30 -04:00
ServiceFlags requiredServiceBits = GetDesirableServiceFlags ( NODE_NONE ) ;
2017-10-19 17:32:45 -04:00
std : : string host = strprintf ( " x%x.%s " , requiredServiceBits , seed ) ;
2017-05-23 20:48:08 -04:00
CNetAddr resolveSource ;
if ( ! resolveSource . SetInternal ( host ) ) {
continue ;
}
2018-03-06 18:26:29 -05:00
unsigned int nMaxIPs = 256 ; // Limits number of IPs learned from a DNS seed
2022-10-07 11:10:35 -03:00
const auto addresses { LookupHost ( host , nMaxIPs , true ) } ;
if ( ! addresses . empty ( ) ) {
for ( const CNetAddr & ip : addresses ) {
2023-09-12 13:42:36 +02:00
CAddress addr = CAddress ( CService ( ip , m_params . GetDefaultPort ( ) ) , requiredServiceBits ) ;
2022-03-28 14:20:04 +02:00
addr . nTime = rng . rand_uniform_delay ( Now < NodeSeconds > ( ) - 3 * 24 h , - 4 * 24 h ) ; // use a random age between 3 and 7 days old
2013-01-29 23:13:17 -05:00
vAdd . push_back ( addr ) ;
found + + ;
2011-05-02 15:34:42 +02:00
}
2017-05-23 20:48:08 -04:00
addrman . Add ( vAdd , resolveSource ) ;
2017-10-19 17:32:45 -04:00
} else {
2020-09-25 12:33:47 -07:00
// If the seed does not support a subdomain with our desired service bits,
// we make an ADDR_FETCH connection to the DNS resolved peer address for the
// base dns seed domain in chainparams
2020-07-17 14:56:34 -07:00
AddAddrFetch ( seed ) ;
2016-04-12 20:38:06 -04:00
}
2011-03-08 22:40:50 -05:00
}
2019-03-07 15:30:59 -08:00
- - seeds_right_now ;
2011-03-08 22:40:50 -05:00
}
2013-09-18 20:38:08 +10:00
LogPrintf ( " %d addresses found from DNS seeds \n " , found ) ;
2011-03-08 22:40:50 -05:00
}
2010-08-29 16:58:15 +00:00
2016-04-16 17:43:11 -04:00
void CConnman : : DumpAddresses ( )
2012-01-04 23:39:45 +01:00
{
2022-06-28 17:50:53 +02:00
const auto start { SteadyClock : : now ( ) } ;
2012-05-16 22:11:19 -04:00
2021-08-21 11:22:21 +02:00
DumpPeerAddresses ( : : gArgs , addrman ) ;
2012-05-16 22:11:19 -04:00
2016-12-25 20:19:40 +00:00
LogPrint ( BCLog : : NET , " Flushed %d addresses to peers.dat %dms \n " ,
2023-01-13 14:23:38 -08:00
addrman . Size ( ) , Ticks < std : : chrono : : milliseconds > ( SteadyClock : : now ( ) - start ) ) ;
2012-01-04 23:39:45 +01:00
}
2010-08-29 16:58:15 +00:00
2020-07-17 14:56:34 -07:00
void CConnman : : ProcessAddrFetch ( )
2012-04-24 02:15:00 +02:00
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2016-04-15 19:53:45 -04:00
std : : string strDest ;
2012-04-24 02:15:00 +02:00
{
2020-07-17 14:56:34 -07:00
LOCK ( m_addr_fetches_mutex ) ;
if ( m_addr_fetches . empty ( ) )
2012-04-24 02:15:00 +02:00
return ;
2020-07-17 14:56:34 -07:00
strDest = m_addr_fetches . front ( ) ;
m_addr_fetches . pop_front ( ) ;
2012-04-24 02:15:00 +02:00
}
CAddress addr ;
2023-08-21 18:14:52 -04:00
CSemaphoreGrant grant ( * semOutbound , /*fTry=*/ true ) ;
2012-05-10 18:44:07 +02:00
if ( grant ) {
2023-08-21 18:14:52 -04:00
OpenNetworkConnection ( addr , false , std : : move ( grant ) , strDest . c_str ( ) , ConnectionType : : ADDR_FETCH , /*use_v2transport=*/ false ) ;
2012-05-10 18:44:07 +02:00
}
2012-04-24 02:15:00 +02:00
}
2021-04-17 19:17:40 +02:00
bool CConnman : : GetTryNewOutboundPeer ( ) const
2017-10-23 13:36:15 -04:00
{
return m_try_another_outbound_peer ;
}
void CConnman : : SetTryNewOutboundPeer ( bool flag )
{
m_try_another_outbound_peer = flag ;
scripted-diff: remove duplicate categories from LogPrint output
-BEGIN VERIFY SCRIPT-
s() { git grep -l "$1" src | xargs sed -i "s/$1/$2/g"; }
s 'BCLog::TOR, "tor: ' 'BCLog::TOR, "'
s 'BCLog::I2P, "I2P: ' 'BCLog::I2P, "'
s 'BCLog::NET, "net: ' 'BCLog::NET, "'
s 'BCLog::ZMQ, "zmq: ' 'BCLog::ZMQ, "'
s 'BCLog::PRUNE, "Prune: ' 'BCLog::PRUNE, "'
-END VERIFY SCRIPT-
2022-05-24 21:16:39 +02:00
LogPrint ( BCLog : : NET , " setting try another outbound peer=%s \n " , flag ? " true " : " false " ) ;
2017-10-23 13:36:15 -04:00
}
2022-04-01 18:22:53 +02:00
void CConnman : : StartExtraBlockRelayPeers ( )
{
scripted-diff: remove duplicate categories from LogPrint output
-BEGIN VERIFY SCRIPT-
s() { git grep -l "$1" src | xargs sed -i "s/$1/$2/g"; }
s 'BCLog::TOR, "tor: ' 'BCLog::TOR, "'
s 'BCLog::I2P, "I2P: ' 'BCLog::I2P, "'
s 'BCLog::NET, "net: ' 'BCLog::NET, "'
s 'BCLog::ZMQ, "zmq: ' 'BCLog::ZMQ, "'
s 'BCLog::PRUNE, "Prune: ' 'BCLog::PRUNE, "'
-END VERIFY SCRIPT-
2022-05-24 21:16:39 +02:00
LogPrint ( BCLog : : NET , " enabling extra block-relay-only peers \n " ) ;
2022-04-01 18:22:53 +02:00
m_start_extra_block_relay_peers = true ;
}
2017-10-23 13:36:15 -04:00
// Return the number of peers we have over our outbound connection limit
// Exclude peers that are marked for disconnect, or are going to be
2020-09-15 10:29:20 +03:00
// disconnected soon (eg ADDR_FETCH and FEELER)
2017-10-23 13:36:15 -04:00
// Also exclude peers that haven't finished initial connection handshake yet
// (so that we don't decide we're over our desired connection limit, and then
// evict some peer that has finished the handshake)
2021-04-17 19:17:40 +02:00
int CConnman : : GetExtraFullOutboundCount ( ) const
2017-10-23 13:36:15 -04:00
{
2020-09-01 16:32:09 -04:00
int full_outbound_peers = 0 ;
2017-10-23 13:36:15 -04:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-09-01 16:32:09 -04:00
if ( pnode - > fSuccessfullyConnected & & ! pnode - > fDisconnect & & pnode - > IsFullOutboundConn ( ) ) {
+ + full_outbound_peers ;
2017-10-23 13:36:15 -04:00
}
}
}
2020-09-01 16:32:09 -04:00
return std : : max ( full_outbound_peers - m_max_outbound_full_relay , 0 ) ;
2017-10-23 13:36:15 -04:00
}
2021-04-17 19:17:40 +02:00
int CConnman : : GetExtraBlockRelayCount ( ) const
2020-09-01 17:05:47 -04:00
{
int block_relay_peers = 0 ;
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-09-01 17:05:47 -04:00
if ( pnode - > fSuccessfullyConnected & & ! pnode - > fDisconnect & & pnode - > IsBlockOnlyConn ( ) ) {
+ + block_relay_peers ;
}
}
}
return std : : max ( block_relay_peers - m_max_outbound_block_relay , 0 ) ;
}
2022-11-30 15:55:22 -05:00
std : : unordered_set < Network > CConnman : : GetReachableEmptyNetworks ( ) const
{
std : : unordered_set < Network > networks { } ;
for ( int n = 0 ; n < NET_MAX ; n + + ) {
enum Network net = ( enum Network ) n ;
if ( net = = NET_UNROUTABLE | | net = = NET_INTERNAL ) continue ;
if ( IsReachable ( net ) & & addrman . Size ( net , std : : nullopt ) = = 0 ) {
networks . insert ( net ) ;
}
}
return networks ;
}
2023-02-07 16:03:32 -05:00
bool CConnman : : MultipleManualOrFullOutboundConns ( Network net ) const
{
AssertLockHeld ( m_nodes_mutex ) ;
return m_network_conn_counts [ net ] > 1 ;
}
2023-02-14 17:40:14 -05:00
bool CConnman : : MaybePickPreferredNetwork ( std : : optional < Network > & network )
{
std : : array < Network , 5 > nets { NET_IPV4 , NET_IPV6 , NET_ONION , NET_I2P , NET_CJDNS } ;
Shuffle ( nets . begin ( ) , nets . end ( ) , FastRandomContext ( ) ) ;
LOCK ( m_nodes_mutex ) ;
for ( const auto net : nets ) {
if ( IsReachable ( net ) & & m_network_conn_counts [ net ] = = 0 & & addrman . Size ( net ) ! = 0 ) {
network = net ;
return true ;
}
}
return false ;
}
2017-06-15 09:39:07 +02:00
void CConnman : : ThreadOpenConnections ( const std : : vector < std : : string > connect )
2010-08-29 16:58:15 +00:00
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2023-08-22 20:42:24 -04:00
AssertLockNotHeld ( m_reconnections_mutex ) ;
2022-05-10 09:08:49 +02:00
FastRandomContext rng ;
2010-08-29 16:58:15 +00:00
// Connect to specific addresses
2017-06-15 09:39:07 +02:00
if ( ! connect . empty ( ) )
2010-08-29 16:58:15 +00:00
{
2013-04-13 00:13:08 -05:00
for ( int64_t nLoop = 0 ; ; nLoop + + )
2010-08-29 16:58:15 +00:00
{
2017-06-15 09:39:07 +02:00
for ( const std : : string & strAddr : connect )
2010-08-29 16:58:15 +00:00
{
2016-06-08 19:12:22 +02:00
CAddress addr ( CService ( ) , NODE_NONE ) ;
2023-08-21 18:14:52 -04:00
OpenNetworkConnection ( addr , false , { } , strAddr . c_str ( ) , ConnectionType : : MANUAL , /*use_v2transport=*/ false ) ;
2010-08-29 16:58:15 +00:00
for ( int i = 0 ; i < 10 & & i < nLoop ; i + + )
{
2016-12-27 17:12:44 -05:00
if ( ! interruptNet . sleep_for ( std : : chrono : : milliseconds ( 500 ) ) )
return ;
2010-08-29 16:58:15 +00:00
}
}
2016-12-27 17:12:44 -05:00
if ( ! interruptNet . sleep_for ( std : : chrono : : milliseconds ( 500 ) ) )
return ;
2010-08-29 16:58:15 +00:00
}
}
// Initiate network connections
2020-09-29 20:19:57 -07:00
auto start = GetTime < std : : chrono : : microseconds > ( ) ;
2016-06-17 00:10:07 -04:00
// Minimum time before next feeler connection (in microseconds).
2020-04-15 19:06:59 -04:00
auto next_feeler = GetExponentialRand ( start , FEELER_INTERVAL ) ;
auto next_extra_block_relay = GetExponentialRand ( start , EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL ) ;
2023-02-14 17:40:14 -05:00
auto next_extra_network_peer { GetExponentialRand ( start , EXTRA_NETWORK_PEER_INTERVAL ) } ;
2020-09-05 09:51:33 -07:00
const bool dnsseed = gArgs . GetBoolArg ( " -dnsseed " , DEFAULT_DNSSEED ) ;
bool add_fixed_seeds = gArgs . GetBoolArg ( " -fixedseeds " , DEFAULT_FIXEDSEEDS ) ;
2023-05-04 14:59:29 -04:00
const bool use_seednodes { gArgs . IsArgSet ( " -seednode " ) } ;
2020-09-05 09:51:33 -07:00
if ( ! add_fixed_seeds ) {
LogPrintf ( " Fixed seeds are disabled \n " ) ;
}
2016-12-27 17:12:44 -05:00
while ( ! interruptNet )
2010-08-29 16:58:15 +00:00
{
2020-07-17 14:56:34 -07:00
ProcessAddrFetch ( ) ;
2012-04-24 02:15:00 +02:00
2016-12-27 17:12:44 -05:00
if ( ! interruptNet . sleep_for ( std : : chrono : : milliseconds ( 500 ) ) )
return ;
2012-02-15 21:17:15 +01:00
2023-08-22 20:42:24 -04:00
PerformReconnections ( ) ;
2012-05-10 18:44:07 +02:00
CSemaphoreGrant grant ( * semOutbound ) ;
2016-12-27 17:12:44 -05:00
if ( interruptNet )
return ;
2010-08-29 16:58:15 +00:00
2022-11-30 15:55:22 -05:00
const std : : unordered_set < Network > fixed_seed_networks { GetReachableEmptyNetworks ( ) } ;
if ( add_fixed_seeds & & ! fixed_seed_networks . empty ( ) ) {
2020-09-05 09:51:33 -07:00
// When the node starts with an empty peers.dat, there are a few other sources of peers before
// we fallback on to fixed seeds: -dnsseed, -seednode, -addnode
// If none of those are available, we fallback on to fixed seeds immediately, else we allow
// 60 seconds for any of those sources to populate addrman.
bool add_fixed_seeds_now = false ;
// It is cheapest to check if enough time has passed first.
if ( GetTime < std : : chrono : : seconds > ( ) > start + std : : chrono : : minutes { 1 } ) {
add_fixed_seeds_now = true ;
2022-11-30 15:55:22 -05:00
LogPrintf ( " Adding fixed seeds as 60 seconds have passed and addrman is empty for at least one reachable network \n " ) ;
2020-09-05 09:51:33 -07:00
}
2023-05-04 14:59:29 -04:00
// Perform cheap checks before locking a mutex.
else if ( ! dnsseed & & ! use_seednodes ) {
LOCK ( m_added_nodes_mutex ) ;
2021-12-28 13:26:20 -08:00
if ( m_added_node_params . empty ( ) ) {
2020-09-05 09:51:33 -07:00
add_fixed_seeds_now = true ;
2023-05-04 14:59:29 -04:00
LogPrintf ( " Adding fixed seeds as -dnsseed=0 (or IPv4/IPv6 connections are disabled via -onlynet) and neither -addnode nor -seednode are provided \n " ) ;
2020-09-05 09:51:33 -07:00
}
}
if ( add_fixed_seeds_now ) {
2023-09-12 13:42:36 +02:00
std : : vector < CAddress > seed_addrs { ConvertSeeds ( m_params . FixedSeeds ( ) ) } ;
2022-09-07 21:30:50 +03:00
// We will not make outgoing connections to peers that are unreachable
// (e.g. because of -onlynet configuration).
// Therefore, we do not add them to addrman in the first place.
2022-11-30 15:55:22 -05:00
// In case previously unreachable networks become reachable
// (e.g. in case of -onlynet changes by the user), fixed seeds will
2023-02-15 14:03:37 -08:00
// be loaded only for networks for which we have no addresses.
2022-07-22 13:54:35 -04:00
seed_addrs . erase ( std : : remove_if ( seed_addrs . begin ( ) , seed_addrs . end ( ) ,
2022-11-30 15:55:22 -05:00
[ & fixed_seed_networks ] ( const CAddress & addr ) { return fixed_seed_networks . count ( addr . GetNetwork ( ) ) = = 0 ; } ) ,
seed_addrs . end ( ) ) ;
2016-05-31 13:05:52 -04:00
CNetAddr local ;
2017-06-21 15:45:20 -04:00
local . SetInternal ( " fixedseeds " ) ;
2022-07-22 13:54:35 -04:00
addrman . Add ( seed_addrs , local ) ;
2020-09-05 09:51:33 -07:00
add_fixed_seeds = false ;
2022-07-22 13:54:35 -04:00
LogPrintf ( " Added %d fixed seeds from reachable networks. \n " , seed_addrs . size ( ) ) ;
2010-08-29 16:58:15 +00:00
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect ;
2023-03-30 20:18:14 +05:30
// Only connect out to one peer per ipv4/ipv6 network group (/16 for IPv4).
2019-03-09 12:55:06 -05:00
int nOutboundFullRelay = 0 ;
int nOutboundBlockRelay = 0 ;
2023-03-30 20:18:14 +05:30
int outbound_privacy_network_peers = 0 ;
2023-04-14 11:01:43 -07:00
std : : set < std : : vector < unsigned char > > outbound_ipv46_peer_netgroups ;
2020-06-02 21:23:44 -07:00
2012-04-06 18:39:12 +02:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-06-02 21:23:44 -07:00
if ( pnode - > IsFullOutboundConn ( ) ) nOutboundFullRelay + + ;
if ( pnode - > IsBlockOnlyConn ( ) ) nOutboundBlockRelay + + ;
2023-03-30 20:18:14 +05:30
// Make sure our persistent outbound slots to ipv4/ipv6 peers belong to different netgroups.
2023-03-24 15:29:21 +01:00
switch ( pnode - > m_conn_type ) {
2021-09-14 14:54:40 +03:00
// We currently don't take inbound connections into account. Since they are
// free to make, an attacker could make them to prevent us from connecting to
// certain peers.
2020-06-02 21:23:44 -07:00
case ConnectionType : : INBOUND :
2021-09-14 14:54:40 +03:00
// Short-lived outbound connections should not affect how we select outbound
// peers from addrman.
case ConnectionType : : ADDR_FETCH :
case ConnectionType : : FEELER :
2020-06-02 21:23:44 -07:00
break ;
2021-09-14 15:19:41 +03:00
case ConnectionType : : MANUAL :
2020-08-11 20:37:32 -07:00
case ConnectionType : : OUTBOUND_FULL_RELAY :
2020-06-02 21:23:44 -07:00
case ConnectionType : : BLOCK_RELAY :
2023-04-14 11:01:43 -07:00
const CAddress address { pnode - > addr } ;
2023-03-30 20:18:14 +05:30
if ( address . IsTor ( ) | | address . IsI2P ( ) | | address . IsCJDNS ( ) ) {
// Since our addrman-groups for these networks are
// random, without relation to the route we
// take to connect to these peers or to the
// difficulty in obtaining addresses with diverse
// groups, we don't worry about diversity with
// respect to our addrman groups when connecting to
// these networks.
+ + outbound_privacy_network_peers ;
} else {
2023-04-14 11:01:43 -07:00
outbound_ipv46_peer_netgroups . insert ( m_netgroupman . GetGroup ( address ) ) ;
2023-03-30 20:18:14 +05:30
}
2020-08-20 15:26:27 -07:00
} // no default case, so the compiler can warn about missing cases
2012-05-10 18:44:07 +02:00
}
2012-04-06 18:39:12 +02:00
}
2010-08-29 16:58:15 +00:00
2020-08-13 21:54:38 -07:00
ConnectionType conn_type = ConnectionType : : OUTBOUND_FULL_RELAY ;
2020-09-29 20:19:57 -07:00
auto now = GetTime < std : : chrono : : microseconds > ( ) ;
2020-06-05 09:38:09 +03:00
bool anchor = false ;
2016-06-17 00:10:07 -04:00
bool fFeeler = false ;
2023-02-14 17:40:14 -05:00
std : : optional < Network > preferred_net ;
2017-10-23 13:36:15 -04:00
2020-08-13 21:54:38 -07:00
// Determine what type of connection to open. Opening
2020-06-05 09:38:09 +03:00
// BLOCK_RELAY connections to addresses from anchors.dat gets the highest
// priority. Then we open OUTBOUND_FULL_RELAY priority until we
2020-08-13 21:54:38 -07:00
// meet our full-relay capacity. Then we open BLOCK_RELAY connection
// until we hit our block-relay-only peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
2020-09-01 17:05:47 -04:00
// these conditions are met, check to see if it's time to try an extra
2020-09-29 20:19:57 -07:00
// block-relay-only peer (to confirm our tip is current, see below) or the next_feeler
2020-09-01 17:05:47 -04:00
// timer to decide if we should open a FEELER.
2020-08-13 21:54:38 -07:00
2020-06-05 09:38:09 +03:00
if ( ! m_anchors . empty ( ) & & ( nOutboundBlockRelay < m_max_outbound_block_relay ) ) {
conn_type = ConnectionType : : BLOCK_RELAY ;
anchor = true ;
} else if ( nOutboundFullRelay < m_max_outbound_full_relay ) {
2020-08-13 21:54:38 -07:00
// OUTBOUND_FULL_RELAY
} else if ( nOutboundBlockRelay < m_max_outbound_block_relay ) {
conn_type = ConnectionType : : BLOCK_RELAY ;
} else if ( GetTryNewOutboundPeer ( ) ) {
// OUTBOUND_FULL_RELAY
2020-09-29 20:19:57 -07:00
} else if ( now > next_extra_block_relay & & m_start_extra_block_relay_peers ) {
2020-09-01 17:05:47 -04:00
// Periodically connect to a peer (using regular outbound selection
// methodology from addrman) and stay connected long enough to sync
// headers, but not much else.
//
// Then disconnect the peer, if we haven't learned anything new.
//
// The idea is to make eclipse attacks very difficult to pull off,
// because every few minutes we're finding a new peer to learn headers
// from.
//
// This is similar to the logic for trying extra outbound (full-relay)
// peers, except:
2020-04-15 19:06:59 -04:00
// - we do this all the time on an exponential timer, rather than just when
2020-09-01 17:05:47 -04:00
// our tip is stale
// - we potentially disconnect our next-youngest block-relay-only peer, if our
// newest block-relay-only peer delivers a block more recently.
// See the eviction logic in net_processing.cpp.
//
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
2020-04-15 19:06:59 -04:00
next_extra_block_relay = GetExponentialRand ( now , EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL ) ;
2020-09-01 17:05:47 -04:00
conn_type = ConnectionType : : BLOCK_RELAY ;
2020-09-29 20:19:57 -07:00
} else if ( now > next_feeler ) {
2020-04-15 19:06:59 -04:00
next_feeler = GetExponentialRand ( now , FEELER_INTERVAL ) ;
2020-08-13 21:54:38 -07:00
conn_type = ConnectionType : : FEELER ;
fFeeler = true ;
2023-02-14 17:40:14 -05:00
} else if ( nOutboundFullRelay = = m_max_outbound_full_relay & &
m_max_outbound_full_relay = = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS & &
now > next_extra_network_peer & &
MaybePickPreferredNetwork ( preferred_net ) ) {
// Full outbound connection management: Attempt to get at least one
// outbound peer from each reachable network by making extra connections
// and then protecting "only" peers from a network during outbound eviction.
// This is not attempted if the user changed -maxconnections to a value
// so low that less than MAX_OUTBOUND_FULL_RELAY_CONNECTIONS are made,
// to prevent interactions with otherwise protected outbound peers.
next_extra_network_peer = GetExponentialRand ( now , EXTRA_NETWORK_PEER_INTERVAL ) ;
2020-08-13 21:54:38 -07:00
} else {
// skip to next iteration of while loop
continue ;
2016-06-17 00:10:07 -04:00
}
2011-10-03 23:41:47 -04:00
2016-10-27 13:55:39 -04:00
addrman . ResolveCollisions ( ) ;
2022-03-24 19:56:00 +01:00
const auto current_time { NodeClock : : now ( ) } ;
2012-01-04 23:39:45 +01:00
int nTries = 0 ;
2016-12-27 17:12:44 -05:00
while ( ! interruptNet )
2010-08-29 16:58:15 +00:00
{
2020-06-05 09:38:09 +03:00
if ( anchor & & ! m_anchors . empty ( ) ) {
const CAddress addr = m_anchors . back ( ) ;
m_anchors . pop_back ( ) ;
if ( ! addr . IsValid ( ) | | IsLocal ( addr ) | | ! IsReachable ( addr ) | |
! HasAllDesirableServiceFlags ( addr . nServices ) | |
2023-04-14 11:01:43 -07:00
outbound_ipv46_peer_netgroups . count ( m_netgroupman . GetGroup ( addr ) ) ) continue ;
2020-06-05 09:38:09 +03:00
addrConnect = addr ;
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " Trying to make an anchor connection to %s \n " , addrConnect . ToStringAddrPort ( ) ) ;
2020-06-05 09:38:09 +03:00
break ;
}
2020-09-12 18:17:49 +03:00
// If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
// stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman addresses.
nTries + + ;
if ( nTries > 100 )
break ;
2021-08-25 15:40:59 -07:00
CAddress addr ;
2022-03-28 14:20:04 +02:00
NodeSeconds addr_last_try { 0 s } ;
2020-10-16 14:05:09 -04:00
if ( fFeeler ) {
// First, try to get a tried table collision address. This returns
// an empty (invalid) address if there are no collisions to try.
2021-08-25 15:40:59 -07:00
std : : tie ( addr , addr_last_try ) = addrman . SelectTriedCollision ( ) ;
2020-10-16 14:05:09 -04:00
if ( ! addr . IsValid ( ) ) {
// No tried table collisions. Select a new table address
// for our feeler.
2021-08-25 15:40:59 -07:00
std : : tie ( addr , addr_last_try ) = addrman . Select ( true ) ;
2020-10-16 14:05:09 -04:00
} else if ( AlreadyConnectedToAddress ( addr ) ) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
// address as Good(). We won't be able to initiate the
// connection anyway, so this avoids inadvertently evicting
// a currently-connected peer.
addrman . Good ( addr ) ;
// Select a new table address for our feeler instead.
2021-08-25 15:40:59 -07:00
std : : tie ( addr , addr_last_try ) = addrman . Select ( true ) ;
2020-10-16 14:05:09 -04:00
}
} else {
// Not a feeler
2023-02-14 17:40:14 -05:00
// If preferred_net has a value set, pick an extra outbound
// peer from that network. The eviction logic in net_processing
// ensures that a peer from another network will be evicted.
std : : tie ( addr , addr_last_try ) = addrman . Select ( false , preferred_net ) ;
2016-10-27 13:55:39 -04:00
}
2010-08-29 16:58:15 +00:00
2023-04-14 11:01:43 -07:00
// Require outbound IPv4/IPv6 connections, other than feelers, to be to distinct network groups
if ( ! fFeeler & & outbound_ipv46_peer_netgroups . count ( m_netgroupman . GetGroup ( addr ) ) ) {
2023-06-10 08:05:10 -03:00
continue ;
2019-02-26 15:04:48 -05:00
}
2019-03-01 16:15:50 -05:00
// if we selected an invalid or local address, restart
2019-02-26 15:04:48 -05:00
if ( ! addr . IsValid ( ) | | IsLocal ( addr ) ) {
2012-01-04 23:39:45 +01:00
break ;
2019-02-26 15:04:48 -05:00
}
2010-08-29 16:58:15 +00:00
2019-01-09 16:41:37 -08:00
if ( ! IsReachable ( addr ) )
2012-05-04 16:46:22 +02:00
continue ;
2012-01-04 23:39:45 +01:00
// only consider very recently tried nodes after 30 failed attempts
2022-03-24 19:56:00 +01:00
if ( current_time - addr_last_try < 10 min & & nTries < 30 ) {
2012-01-04 23:39:45 +01:00
continue ;
2022-03-28 14:20:04 +02:00
}
2012-01-04 23:39:45 +01:00
2017-10-04 17:59:30 -04:00
// for non-feelers, require all the services we'll want,
// for feelers, only require they be a full node (only because most
// SPV clients don't have a good address DB available)
if ( ! fFeeler & & ! HasAllDesirableServiceFlags ( addr . nServices ) ) {
continue ;
} else if ( fFeeler & & ! MayHaveUsefulAddressDB ( addr . nServices ) ) {
2015-11-17 00:20:49 +01:00
continue ;
2017-05-24 17:00:27 -04:00
}
2015-11-17 00:20:49 +01:00
2021-11-18 09:19:09 +01:00
// Do not connect to bad ports, unless 50 invalid addresses have been selected already.
if ( nTries < 50 & & ( addr . IsIPv4 ( ) | | addr . IsIPv6 ( ) ) & & IsBadPort ( addr . GetPort ( ) ) ) {
2012-01-04 23:39:45 +01:00
continue ;
2021-05-31 17:30:18 +02:00
}
2012-01-04 23:39:45 +01:00
addrConnect = addr ;
break ;
2010-08-29 16:58:15 +00:00
}
2016-06-17 00:10:07 -04:00
if ( addrConnect . IsValid ( ) ) {
if ( fFeeler ) {
// Add small amount of random noise before connection to avoid synchronization.
2022-05-10 09:08:49 +02:00
if ( ! interruptNet . sleep_for ( rng . rand_uniform_duration < CThreadInterrupt : : Clock > ( FEELER_SLEEP_WINDOW ) ) ) {
2016-12-27 17:12:44 -05:00
return ;
2022-05-10 09:08:49 +02:00
}
2022-07-15 14:13:39 +02:00
LogPrint ( BCLog : : NET , " Making feeler connection to %s \n " , addrConnect . ToStringAddrPort ( ) ) ;
2016-06-17 00:10:07 -04:00
}
2023-02-14 17:40:14 -05:00
if ( preferred_net ! = std : : nullopt ) LogPrint ( BCLog : : NET , " Making network specific connection to %s on %s. \n " , addrConnect . ToStringAddrPort ( ) , GetNetworkName ( preferred_net . value ( ) ) ) ;
2023-03-30 20:18:14 +05:30
// Record addrman failure attempts when node has at least 2 persistent outbound connections to peers with
// different netgroups in ipv4/ipv6 networks + all peers in Tor/I2P/CJDNS networks.
// Don't record addrman failure attempts when node is offline. This can be identified since all local
2023-04-14 11:01:43 -07:00
// network connections (if any) belong in the same netgroup, and the size of `outbound_ipv46_peer_netgroups` would only be 1.
const bool count_failures { ( ( int ) outbound_ipv46_peer_netgroups . size ( ) + outbound_privacy_network_peers ) > = std : : min ( nMaxConnections - 1 , 2 ) } ;
2023-08-21 16:55:47 -04:00
// Use BIP324 transport when both us and them have NODE_V2_P2P set.
const bool use_v2transport ( addrConnect . nServices & GetLocalServices ( ) & NODE_P2P_V2 ) ;
2023-08-21 18:14:52 -04:00
OpenNetworkConnection ( addrConnect , count_failures , std : : move ( grant ) , /*strDest=*/ nullptr , conn_type , use_v2transport ) ;
2016-06-17 00:10:07 -04:00
}
2010-08-29 16:58:15 +00:00
}
}
2020-09-12 18:03:06 +03:00
std : : vector < CAddress > CConnman : : GetCurrentBlockRelayOnlyConns ( ) const
{
std : : vector < CAddress > ret ;
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2020-09-12 18:03:06 +03:00
if ( pnode - > IsBlockOnlyConn ( ) ) {
ret . push_back ( pnode - > addr ) ;
}
}
return ret ;
}
2021-04-17 19:17:40 +02:00
std : : vector < AddedNodeInfo > CConnman : : GetAddedNodeInfo ( ) const
2011-12-16 19:48:03 -05:00
{
2016-05-28 15:32:30 +02:00
std : : vector < AddedNodeInfo > ret ;
2021-12-28 13:26:20 -08:00
std : : list < AddedNodeParams > lAddresses ( 0 ) ;
2012-07-02 19:55:16 +02:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_added_nodes_mutex ) ;
2021-12-28 13:26:20 -08:00
ret . reserve ( m_added_node_params . size ( ) ) ;
std : : copy ( m_added_node_params . cbegin ( ) , m_added_node_params . cend ( ) , std : : back_inserter ( lAddresses ) ) ;
2012-07-02 19:55:16 +02:00
}
2011-12-16 19:48:03 -05:00
2016-05-28 15:32:30 +02:00
// Build a map of all already connected addresses (by IP:port and by name) to inbound/outbound and resolved CService
std : : map < CService , bool > mapConnected ;
std : : map < std : : string , std : : pair < bool , CService > > mapConnectedByName ;
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( const CNode * pnode : m_nodes ) {
2016-05-28 15:32:30 +02:00
if ( pnode - > addr . IsValid ( ) ) {
2020-07-28 13:39:38 -07:00
mapConnected [ pnode - > addr ] = pnode - > IsInboundConn ( ) ;
2012-07-02 19:55:16 +02:00
}
2021-08-26 10:39:10 +02:00
std : : string addrName { pnode - > m_addr_name } ;
2017-02-06 12:04:34 -05:00
if ( ! addrName . empty ( ) ) {
2020-07-28 13:39:38 -07:00
mapConnectedByName [ std : : move ( addrName ) ] = std : : make_pair ( pnode - > IsInboundConn ( ) , static_cast < const CService & > ( pnode - > addr ) ) ;
2012-04-19 17:38:03 +02:00
}
}
}
2021-12-28 13:26:20 -08:00
for ( const auto & addr : lAddresses ) {
CService service ( LookupNumeric ( addr . m_added_node , GetDefaultPort ( addr . m_added_node ) ) ) ;
AddedNodeInfo addedNode { addr , CService ( ) , false , false } ;
2016-05-28 15:32:30 +02:00
if ( service . IsValid ( ) ) {
// strAddNode is an IP:port
auto it = mapConnected . find ( service ) ;
if ( it ! = mapConnected . end ( ) ) {
2018-01-26 02:48:56 -08:00
addedNode . resolvedAddress = service ;
addedNode . fConnected = true ;
addedNode . fInbound = it - > second ;
2016-05-28 15:32:30 +02:00
}
} else {
// strAddNode is a name
2021-12-28 13:26:20 -08:00
auto it = mapConnectedByName . find ( addr . m_added_node ) ;
2016-05-28 15:32:30 +02:00
if ( it ! = mapConnectedByName . end ( ) ) {
2018-01-26 02:48:56 -08:00
addedNode . resolvedAddress = it - > second . second ;
addedNode . fConnected = true ;
addedNode . fInbound = it - > second . first ;
2012-04-19 17:38:03 +02:00
}
}
2018-01-26 02:48:56 -08:00
ret . emplace_back ( std : : move ( addedNode ) ) ;
2012-04-19 17:38:03 +02:00
}
2016-05-28 15:32:30 +02:00
return ret ;
}
2016-04-16 14:47:18 -04:00
void CConnman : : ThreadOpenAddedConnections ( )
2016-05-28 15:32:30 +02:00
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2023-08-22 20:42:24 -04:00
AssertLockNotHeld ( m_reconnections_mutex ) ;
2016-12-11 04:39:26 +00:00
while ( true )
2011-12-16 19:48:03 -05:00
{
2016-12-11 04:39:26 +00:00
CSemaphoreGrant grant ( * semAddnode ) ;
2016-05-28 15:32:30 +02:00
std : : vector < AddedNodeInfo > vInfo = GetAddedNodeInfo ( ) ;
2016-12-11 04:39:26 +00:00
bool tried = false ;
2016-05-28 15:32:30 +02:00
for ( const AddedNodeInfo & info : vInfo ) {
if ( ! info . fConnected ) {
2023-08-21 18:14:52 -04:00
if ( ! grant ) {
2018-03-18 16:26:45 +02:00
// If we've used up our semaphore and need a new one, let's not wait here since while we are waiting
2016-12-11 04:39:26 +00:00
// the addednodeinfo state might change.
break ;
}
tried = true ;
2017-06-23 12:29:50 -04:00
CAddress addr ( CService ( ) , NODE_NONE ) ;
2023-08-21 18:14:52 -04:00
OpenNetworkConnection ( addr , false , std : : move ( grant ) , info . m_params . m_added_node . c_str ( ) , ConnectionType : : MANUAL , info . m_params . m_use_v2transport ) ;
if ( ! interruptNet . sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ) return ;
grant = CSemaphoreGrant ( * semAddnode , /*fTry=*/ true ) ;
2016-05-28 15:32:30 +02:00
}
2012-07-02 19:55:16 +02:00
}
2016-12-11 04:39:26 +00:00
// Retry every 60 seconds if a connection was attempted, otherwise two seconds
2017-01-07 09:49:14 -08:00
if ( ! interruptNet . sleep_for ( std : : chrono : : seconds ( tried ? 60 : 2 ) ) )
2016-12-27 17:12:44 -05:00
return ;
2023-08-22 20:42:24 -04:00
// See if any reconnections are desired.
PerformReconnections ( ) ;
2011-12-16 19:48:03 -05:00
}
}
2012-07-26 00:48:39 +00:00
// if successful, this moves the passed grant to the constructed node
2023-08-21 18:14:52 -04:00
void CConnman : : OpenNetworkConnection ( const CAddress & addrConnect , bool fCountFailure , CSemaphoreGrant & & grant_outbound , const char * pszDest , ConnectionType conn_type , bool use_v2transport )
2010-08-29 16:58:15 +00:00
{
2023-01-06 11:23:46 +01:00
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
2020-04-29 14:55:59 -07:00
assert ( conn_type ! = ConnectionType : : INBOUND ) ;
2010-08-29 16:58:15 +00:00
//
// Initiate outbound network connection
//
2016-12-27 17:12:44 -05:00
if ( interruptNet ) {
2018-02-01 14:04:49 -05:00
return ;
2016-12-27 17:12:44 -05:00
}
2013-03-26 02:33:25 +01:00
if ( ! fNetworkActive ) {
2018-02-01 14:04:49 -05:00
return ;
2013-03-26 02:33:25 +01:00
}
2014-05-24 11:14:52 +02:00
if ( ! pszDest ) {
2020-06-10 17:11:38 -07:00
bool banned_or_discouraged = m_banman & & ( m_banman - > IsDiscouraged ( addrConnect ) | | m_banman - > IsBanned ( addrConnect ) ) ;
2020-10-16 11:10:17 -04:00
if ( IsLocal ( addrConnect ) | | banned_or_discouraged | | AlreadyConnectedToAddress ( addrConnect ) ) {
2018-02-01 14:04:49 -05:00
return ;
2020-06-10 17:11:38 -07:00
}
2015-05-31 15:44:22 +02:00
} else if ( FindNode ( std : : string ( pszDest ) ) )
2018-02-01 14:04:49 -05:00
return ;
2010-08-29 16:58:15 +00:00
2023-08-21 16:55:47 -04:00
CNode * pnode = ConnectNode ( addrConnect , pszDest , fCountFailure , conn_type , use_v2transport ) ;
2013-03-06 22:31:26 -05:00
2010-08-29 16:58:15 +00:00
if ( ! pnode )
2018-02-01 14:04:49 -05:00
return ;
2023-08-21 18:14:52 -04:00
pnode - > grantOutbound = std : : move ( grant_outbound ) ;
2010-08-29 16:58:15 +00:00
2020-07-20 14:01:05 +01:00
m_msgproc - > InitializeNode ( * pnode , nLocalServices ) ;
2017-01-24 16:51:22 -05:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
m_nodes . push_back ( pnode ) ;
2023-06-11 12:26:18 -07:00
// update connection count by network
if ( pnode - > IsManualOrFullOutboundConn ( ) ) + + m_network_conn_counts [ pnode - > addr . GetNetwork ( ) ] ;
2017-01-24 16:51:22 -05:00
}
2010-08-29 16:58:15 +00:00
}
2022-09-07 13:57:18 +10:00
Mutex NetEventsInterface : : g_msgproc_mutex ;
2016-04-16 14:47:18 -04:00
void CConnman : : ThreadMessageHandler ( )
2010-08-29 16:58:15 +00:00
{
2022-09-07 13:57:18 +10:00
LOCK ( NetEventsInterface : : g_msgproc_mutex ) ;
2016-12-27 17:12:44 -05:00
while ( ! flagInterruptMsgProc )
2010-08-29 16:58:15 +00:00
{
2016-12-31 02:05:26 -05:00
bool fMoreWork = false ;
2013-11-15 12:24:34 +01:00
2010-08-29 16:58:15 +00:00
{
2021-04-26 16:22:07 +02:00
// Randomize the order in which we process messages from/to our peers.
// This prevents attacks in which an attacker exploits having multiple
2021-08-28 20:57:52 +02:00
// consecutive connections in the m_nodes list.
2021-04-26 16:22:07 +02:00
const NodesSnapshot snap { * this , /*shuffle=*/ true } ;
2013-03-01 01:41:28 +01:00
2021-04-26 16:22:07 +02:00
for ( CNode * pnode : snap . Nodes ( ) ) {
if ( pnode - > fDisconnect )
continue ;
2017-07-06 13:40:09 -04:00
2021-04-26 16:22:07 +02:00
// Receive messages
bool fMoreNodeWork = m_msgproc - > ProcessMessages ( pnode , flagInterruptMsgProc ) ;
fMoreWork | = ( fMoreNodeWork & & ! pnode - > fPauseSend ) ;
if ( flagInterruptMsgProc )
return ;
// Send messages
2022-09-13 12:22:18 +10:00
m_msgproc - > SendMessages ( pnode ) ;
2010-08-29 16:58:15 +00:00
2021-04-26 16:22:07 +02:00
if ( flagInterruptMsgProc )
return ;
}
2010-08-29 16:58:15 +00:00
}
2013-11-15 12:24:34 +01:00
2017-11-08 17:07:40 -05:00
WAIT_LOCK ( mutexMsgProc , lock ) ;
2016-12-31 02:05:26 -05:00
if ( ! fMoreWork ) {
2019-05-30 13:44:02 +10:00
condMsgProc . wait_until ( lock , std : : chrono : : steady_clock : : now ( ) + std : : chrono : : milliseconds ( 100 ) , [ this ] ( ) EXCLUSIVE_LOCKS_REQUIRED ( mutexMsgProc ) { return fMsgProcWake ; } ) ;
2016-12-27 17:12:44 -05:00
}
2016-12-31 02:05:26 -05:00
fMsgProcWake = false ;
2010-08-29 16:58:15 +00:00
}
}
2020-11-24 11:28:52 +01:00
void CConnman : : ThreadI2PAcceptIncoming ( )
{
static constexpr auto err_wait_begin = 1 s ;
static constexpr auto err_wait_cap = 5 min ;
auto err_wait = err_wait_begin ;
bool advertising_listen_addr = false ;
i2p : : Connection conn ;
while ( ! interruptNet ) {
if ( ! m_i2p_sam_session - > Listen ( conn ) ) {
if ( advertising_listen_addr & & conn . me . IsValid ( ) ) {
RemoveLocal ( conn . me ) ;
advertising_listen_addr = false ;
}
interruptNet . sleep_for ( err_wait ) ;
if ( err_wait < err_wait_cap ) {
err_wait * = 2 ;
}
continue ;
}
if ( ! advertising_listen_addr ) {
2021-05-11 12:44:46 +02:00
AddLocal ( conn . me , LOCAL_MANUAL ) ;
2020-11-24 11:28:52 +01:00
advertising_listen_addr = true ;
}
if ( ! m_i2p_sam_session - > Accept ( conn ) ) {
continue ;
}
2021-04-13 12:14:57 +02:00
CreateNodeFromAcceptedSocket ( std : : move ( conn . sock ) , NetPermissionFlags : : None ,
2020-11-24 11:28:52 +01:00
CAddress { conn . me , NODE_NONE } , CAddress { conn . peer , NODE_NONE } ) ;
}
}
2020-04-11 18:47:17 +03:00
bool CConnman : : BindListenPort ( const CService & addrBind , bilingual_str & strError , NetPermissionFlags permissions )
2010-08-29 16:58:15 +00:00
{
int nOne = 1 ;
// Create socket for listening for incoming connections
2012-05-11 15:28:59 +02:00
struct sockaddr_storage sockaddr ;
socklen_t len = sizeof ( sockaddr ) ;
if ( ! addrBind . GetSockAddr ( ( struct sockaddr * ) & sockaddr , & len ) )
{
2022-07-15 14:13:39 +02:00
strError = strprintf ( Untranslated ( " Bind address family for %s not supported " ) , addrBind . ToStringAddrPort ( ) ) ;
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Error , " %s \n " , strError . original ) ;
2012-05-11 15:28:59 +02:00
return false ;
}
2020-12-23 16:40:11 +01:00
std : : unique_ptr < Sock > sock = CreateSock ( addrBind ) ;
if ( ! sock ) {
2022-04-18 18:01:24 +08:00
strError = strprintf ( Untranslated ( " Couldn't open socket for incoming connections (socket returned error %s) " ) , NetworkErrorString ( WSAGetLastError ( ) ) ) ;
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Error , " %s \n " , strError . original ) ;
2010-08-29 16:58:15 +00:00
return false ;
}
2018-01-26 02:48:56 -08:00
2010-08-29 16:58:15 +00:00
// Allow binding if the port is still in TIME_WAIT state after
2015-08-20 15:50:13 -04:00
// the program was closed and restarted.
2021-04-13 13:28:10 +02:00
if ( sock - > SetSockOpt ( SOL_SOCKET , SO_REUSEADDR , ( sockopt_arg_type ) & nOne , sizeof ( int ) ) = = SOCKET_ERROR ) {
strError = strprintf ( Untranslated ( " Error setting SO_REUSEADDR on socket: %s, continuing anyway " ) , NetworkErrorString ( WSAGetLastError ( ) ) ) ;
LogPrintf ( " %s \n " , strError . original ) ;
}
2010-08-29 16:58:15 +00:00
2012-05-11 15:28:59 +02:00
// some systems don't have IPV6_V6ONLY but are always v6only; others do have the option
// and enable it by default or not. Try to enable it, if possible.
if ( addrBind . IsIPv6 ( ) ) {
# ifdef IPV6_V6ONLY
2021-04-13 13:28:10 +02:00
if ( sock - > SetSockOpt ( IPPROTO_IPV6 , IPV6_V6ONLY , ( sockopt_arg_type ) & nOne , sizeof ( int ) ) = = SOCKET_ERROR ) {
strError = strprintf ( Untranslated ( " Error setting IPV6_V6ONLY on socket: %s, continuing anyway " ) , NetworkErrorString ( WSAGetLastError ( ) ) ) ;
LogPrintf ( " %s \n " , strError . original ) ;
}
2013-07-13 13:05:04 +02:00
# endif
2012-05-11 15:28:59 +02:00
# ifdef WIN32
2014-06-24 09:03:18 +02:00
int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED ;
2021-04-13 13:28:10 +02:00
if ( sock - > SetSockOpt ( IPPROTO_IPV6 , IPV6_PROTECTION_LEVEL , ( const char * ) & nProtLevel , sizeof ( int ) ) = = SOCKET_ERROR ) {
strError = strprintf ( Untranslated ( " Error setting IPV6_PROTECTION_LEVEL on socket: %s, continuing anyway " ) , NetworkErrorString ( WSAGetLastError ( ) ) ) ;
LogPrintf ( " %s \n " , strError . original ) ;
}
2012-05-11 15:28:59 +02:00
# endif
}
2021-04-13 16:31:04 +02:00
if ( sock - > Bind ( reinterpret_cast < struct sockaddr * > ( & sockaddr ) , len ) = = SOCKET_ERROR ) {
2010-08-29 16:58:15 +00:00
int nErr = WSAGetLastError ( ) ;
if ( nErr = = WSAEADDRINUSE )
2022-07-15 14:13:39 +02:00
strError = strprintf ( _ ( " Unable to bind to %s on this computer. %s is probably already running. " ) , addrBind . ToStringAddrPort ( ) , PACKAGE_NAME ) ;
2010-08-29 16:58:15 +00:00
else
2022-07-15 14:13:39 +02:00
strError = strprintf ( _ ( " Unable to bind to %s on this computer (bind returned error %s) " ) , addrBind . ToStringAddrPort ( ) , NetworkErrorString ( nErr ) ) ;
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Error , " %s \n " , strError . original ) ;
2010-08-29 16:58:15 +00:00
return false ;
}
2022-07-15 14:13:39 +02:00
LogPrintf ( " Bound to %s \n " , addrBind . ToStringAddrPort ( ) ) ;
2010-08-29 16:58:15 +00:00
// Listen for incoming connections
2021-04-13 16:43:04 +02:00
if ( sock - > Listen ( SOMAXCONN ) = = SOCKET_ERROR )
2010-08-29 16:58:15 +00:00
{
2022-04-18 18:01:24 +08:00
strError = strprintf ( _ ( " Listening for incoming connections failed (listen returned error %s) " ) , NetworkErrorString ( WSAGetLastError ( ) ) ) ;
2022-05-25 11:31:58 +02:00
LogPrintLevel ( BCLog : : NET , BCLog : : Level : : Error , " %s \n " , strError . original ) ;
2010-08-29 16:58:15 +00:00
return false ;
}
2021-04-23 12:15:15 +02:00
vhListenSocket . emplace_back ( std : : move ( sock ) , permissions ) ;
2010-08-29 16:58:15 +00:00
return true ;
}
2018-02-07 17:42:39 -05:00
void Discover ( )
2010-08-29 16:58:15 +00:00
{
2012-05-24 19:02:21 +02:00
if ( ! fDiscover )
2012-02-19 20:44:35 +01:00
return ;
2010-08-29 16:58:15 +00:00
2011-10-07 11:02:21 -04:00
# ifdef WIN32
2012-07-26 00:48:39 +00:00
// Get local host IP
2014-11-13 15:23:15 +01:00
char pszHostName [ 256 ] = " " ;
2010-08-29 16:58:15 +00:00
if ( gethostname ( pszHostName , sizeof ( pszHostName ) ) ! = SOCKET_ERROR )
{
2022-10-07 11:10:35 -03:00
const std : : vector < CNetAddr > addresses { LookupHost ( pszHostName , 0 , true ) } ;
for ( const CNetAddr & addr : addresses )
2012-05-01 01:44:59 +02:00
{
2022-10-07 11:10:35 -03:00
if ( AddLocal ( addr , LOCAL_IF ) )
LogPrintf ( " %s: %s - %s \n " , __func__ , pszHostName , addr . ToStringAddr ( ) ) ;
2012-05-01 01:44:59 +02:00
}
2010-08-29 16:58:15 +00:00
}
2018-07-19 14:21:05 -04:00
# elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS)
2010-08-29 16:58:15 +00:00
// Get local host ip
struct ifaddrs * myaddrs ;
if ( getifaddrs ( & myaddrs ) = = 0 )
{
2017-08-07 07:36:37 +02:00
for ( struct ifaddrs * ifa = myaddrs ; ifa ! = nullptr ; ifa = ifa - > ifa_next )
2010-08-29 16:58:15 +00:00
{
2017-08-07 07:36:37 +02:00
if ( ifa - > ifa_addr = = nullptr ) continue ;
2010-08-29 16:58:15 +00:00
if ( ( ifa - > ifa_flags & IFF_UP ) = = 0 ) continue ;
if ( strcmp ( ifa - > ifa_name , " lo " ) = = 0 ) continue ;
if ( strcmp ( ifa - > ifa_name , " lo0 " ) = = 0 ) continue ;
if ( ifa - > ifa_addr - > sa_family = = AF_INET )
{
struct sockaddr_in * s4 = ( struct sockaddr_in * ) ( ifa - > ifa_addr ) ;
2012-02-12 13:45:24 +01:00
CNetAddr addr ( s4 - > sin_addr ) ;
2012-03-31 17:58:25 +02:00
if ( AddLocal ( addr , LOCAL_IF ) )
2022-07-15 13:49:17 +02:00
LogPrintf ( " %s: IPv4 %s: %s \n " , __func__ , ifa - > ifa_name , addr . ToStringAddr ( ) ) ;
2010-08-29 16:58:15 +00:00
}
else if ( ifa - > ifa_addr - > sa_family = = AF_INET6 )
{
struct sockaddr_in6 * s6 = ( struct sockaddr_in6 * ) ( ifa - > ifa_addr ) ;
2012-02-12 13:45:24 +01:00
CNetAddr addr ( s6 - > sin6_addr ) ;
2012-03-31 17:58:25 +02:00
if ( AddLocal ( addr , LOCAL_IF ) )
2022-07-15 13:49:17 +02:00
LogPrintf ( " %s: IPv6 %s: %s \n " , __func__ , ifa - > ifa_name , addr . ToStringAddr ( ) ) ;
2010-08-29 16:58:15 +00:00
}
}
freeifaddrs ( myaddrs ) ;
}
# endif
2012-02-19 20:44:35 +01:00
}
2013-03-26 02:33:25 +01:00
void CConnman : : SetNetworkActive ( bool active )
{
2020-07-09 10:12:19 +03:00
LogPrintf ( " %s: %s \n " , __func__ , active ) ;
2013-03-26 02:33:25 +01:00
2017-07-14 15:00:19 +01:00
if ( fNetworkActive = = active ) {
return ;
}
fNetworkActive = active ;
2013-03-26 02:33:25 +01:00
2021-08-18 13:41:39 +08:00
if ( m_client_interface ) {
m_client_interface - > NotifyNetworkActiveChanged ( fNetworkActive ) ;
2021-08-18 13:37:27 +08:00
}
2013-03-26 02:33:25 +01:00
}
2021-08-31 18:40:18 +01:00
CConnman : : CConnman ( uint64_t nSeed0In , uint64_t nSeed1In , AddrMan & addrman_in ,
2023-09-12 13:42:36 +02:00
const NetGroupManager & netgroupman , const CChainParams & params , bool network_active )
2021-08-31 18:40:18 +01:00
: addrman ( addrman_in )
, m_netgroupman { netgroupman }
, nSeed0 ( nSeed0In )
, nSeed1 ( nSeed1In )
2023-09-12 13:42:36 +02:00
, m_params ( params )
2016-04-16 14:47:18 -04:00
{
2017-10-23 13:36:15 -04:00
SetTryNewOutboundPeer ( false ) ;
[net] Fix use of uninitialized value in getnetworkinfo(const JSONRPCRequest& request)
When running test_bitcoin under Valgrind I found the following issue:
```
$ valgrind src/test/test_bitcoin
...
==10465== Use of uninitialised value of size 8
==10465== at 0x6D09B61: ??? (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B1BB: std::ostreambuf_iterator<char, std::char_traits<char> > std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::_M_insert_int<unsigned long>(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B36C: std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::do_put(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D17699: std::ostream& std::ostream::_M_insert<unsigned long>(unsigned long) (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x4CAAD7: operator<< (ostream:171)
==10465== by 0x4CAAD7: formatValue<ServiceFlags> (tinyformat.h:345)
==10465== by 0x4CAAD7: void tinyformat::detail::FormatArg::formatImpl<ServiceFlags>(std::ostream&, char const*, char const*, int, void const*) (tinyformat.h:523)
==10465== by 0x1924D4: format (tinyformat.h:510)
==10465== by 0x1924D4: tinyformat::detail::formatImpl(std::ostream&, char const*, tinyformat::detail::FormatArg const*, int) (tinyformat.h:803)
==10465== by 0x553A55: vformat (tinyformat.h:947)
==10465== by 0x553A55: format<ServiceFlags> (tinyformat.h:957)
==10465== by 0x553A55: std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > tinyformat::format<ServiceFlags>(char const*, ServiceFlags const&) (tinyformat.h:966)
==10465== by 0x54C952: getnetworkinfo(JSONRPCRequest const&) (net.cpp:462)
==10465== by 0x28EDB5: CallRPC(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) (rpc_tests.cpp:31)
==10465== by 0x293947: rpc_tests::rpc_togglenetwork::test_method() (rpc_tests.cpp:88)
==10465== by 0x2950E5: rpc_tests::rpc_togglenetwork_invoker() (rpc_tests.cpp:84)
==10465== by 0x182496: invoke<void (*)()> (callback.hpp:56)
==10465== by 0x182496: boost::unit_test::ut_detail::callback0_impl_t<boost::unit_test::ut_detail::unused, void (*)()>::invoke() (callback.hpp:89)
...
```
The read of the uninitialized variable nLocalServices is triggered by g_connman->GetLocalServices()
in getnetworkinfo(const JSONRPCRequest& request) (net.cpp:462):
```c++
UniValue getnetworkinfo(const JSONRPCRequest& request)
{
...
if(g_connman)
obj.push_back(Pair("localservices", strprintf("%016x", g_connman->GetLocalServices())));
...
}
```
The reason for the uninitialized nLocalServices is that CConnman::Start(...) is not called
by the tests, and hence the initialization normally performed by CConnman::Start(...) is
not done.
This commit adds a method Init(const Options& connOptions) which is called by both the
constructor and CConnman::Start(...). This method initializes nLocalServices and the other
relevant values from the supplied Options object.
2017-08-02 14:02:36 +02:00
Options connOptions ;
Init ( connOptions ) ;
2020-07-09 10:07:47 +03:00
SetNetworkActive ( network_active ) ;
2016-04-16 14:47:18 -04:00
}
2016-04-17 20:20:34 -04:00
NodeId CConnman : : GetNewNodeId ( )
2016-04-16 17:43:11 -04:00
{
2016-04-17 20:20:34 -04:00
return nLastNodeId . fetch_add ( 1 , std : : memory_order_relaxed ) ;
}
2016-04-16 17:43:11 -04:00
2023-09-12 13:42:52 +02:00
uint16_t CConnman : : GetDefaultPort ( Network net ) const
{
return net = = NET_I2P ? I2P_SAM31_PORT : m_params . GetDefaultPort ( ) ;
}
uint16_t CConnman : : GetDefaultPort ( const std : : string & addr ) const
{
CNetAddr a ;
return a . SetSpecial ( addr ) ? GetDefaultPort ( a . GetNetwork ( ) ) : m_params . GetDefaultPort ( ) ;
}
2017-06-01 12:34:02 +02:00
2021-09-13 13:02:05 +02:00
bool CConnman : : Bind ( const CService & addr_ , unsigned int flags , NetPermissionFlags permissions )
{
const CService addr { MaybeFlipIPv6toCJDNS ( addr_ ) } ;
2020-04-11 18:47:17 +03:00
bilingual_str strError ;
2019-06-20 18:37:51 +09:00
if ( ! BindListenPort ( addr , strError , permissions ) ) {
2021-08-18 13:41:39 +08:00
if ( ( flags & BF_REPORT_ERROR ) & & m_client_interface ) {
m_client_interface - > ThreadSafeMessageBox ( strError , " " , CClientUIInterface : : MSG_ERROR ) ;
2017-06-01 12:34:02 +02:00
}
return false ;
}
2020-09-29 18:02:44 +03:00
2021-03-21 22:46:50 +01:00
if ( addr . IsRoutable ( ) & & fDiscover & & ! ( flags & BF_DONT_ADVERTISE ) & & ! NetPermissions : : HasFlag ( permissions , NetPermissionFlags : : NoBan ) ) {
2020-09-29 18:02:44 +03:00
AddLocal ( addr , LOCAL_BIND ) ;
}
2017-06-01 12:34:02 +02:00
return true ;
}
2020-10-22 20:34:31 +02:00
bool CConnman : : InitBinds ( const Options & options )
2019-06-20 18:37:51 +09:00
{
2017-06-01 12:34:02 +02:00
bool fBound = false ;
2020-10-22 20:34:31 +02:00
for ( const auto & addrBind : options . vBinds ) {
2022-07-29 13:23:29 +02:00
fBound | = Bind ( addrBind , BF_REPORT_ERROR , NetPermissionFlags : : None ) ;
2017-06-01 12:34:02 +02:00
}
2020-10-22 20:34:31 +02:00
for ( const auto & addrBind : options . vWhiteBinds ) {
2022-07-29 13:23:29 +02:00
fBound | = Bind ( addrBind . m_service , BF_REPORT_ERROR , addrBind . m_flags ) ;
2017-06-01 12:34:02 +02:00
}
2020-10-22 20:34:31 +02:00
for ( const auto & addr_bind : options . onion_binds ) {
2022-07-29 13:23:29 +02:00
fBound | = Bind ( addr_bind , BF_DONT_ADVERTISE , NetPermissionFlags : : None ) ;
2020-10-22 20:34:31 +02:00
}
if ( options . bind_on_any ) {
2017-06-01 12:34:02 +02:00
struct in_addr inaddr_any ;
2020-09-22 13:42:47 +03:00
inaddr_any . s_addr = htonl ( INADDR_ANY ) ;
2018-07-16 16:29:27 +10:00
struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT ;
2021-03-21 22:46:50 +01:00
fBound | = Bind ( CService ( inaddr6_any , GetListenPort ( ) ) , BF_NONE , NetPermissionFlags : : None ) ;
fBound | = Bind ( CService ( inaddr_any , GetListenPort ( ) ) , ! fBound ? BF_REPORT_ERROR : BF_NONE , NetPermissionFlags : : None ) ;
2017-06-01 12:34:02 +02:00
}
return fBound ;
}
[net] Fix use of uninitialized value in getnetworkinfo(const JSONRPCRequest& request)
When running test_bitcoin under Valgrind I found the following issue:
```
$ valgrind src/test/test_bitcoin
...
==10465== Use of uninitialised value of size 8
==10465== at 0x6D09B61: ??? (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B1BB: std::ostreambuf_iterator<char, std::char_traits<char> > std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::_M_insert_int<unsigned long>(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B36C: std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::do_put(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D17699: std::ostream& std::ostream::_M_insert<unsigned long>(unsigned long) (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x4CAAD7: operator<< (ostream:171)
==10465== by 0x4CAAD7: formatValue<ServiceFlags> (tinyformat.h:345)
==10465== by 0x4CAAD7: void tinyformat::detail::FormatArg::formatImpl<ServiceFlags>(std::ostream&, char const*, char const*, int, void const*) (tinyformat.h:523)
==10465== by 0x1924D4: format (tinyformat.h:510)
==10465== by 0x1924D4: tinyformat::detail::formatImpl(std::ostream&, char const*, tinyformat::detail::FormatArg const*, int) (tinyformat.h:803)
==10465== by 0x553A55: vformat (tinyformat.h:947)
==10465== by 0x553A55: format<ServiceFlags> (tinyformat.h:957)
==10465== by 0x553A55: std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > tinyformat::format<ServiceFlags>(char const*, ServiceFlags const&) (tinyformat.h:966)
==10465== by 0x54C952: getnetworkinfo(JSONRPCRequest const&) (net.cpp:462)
==10465== by 0x28EDB5: CallRPC(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) (rpc_tests.cpp:31)
==10465== by 0x293947: rpc_tests::rpc_togglenetwork::test_method() (rpc_tests.cpp:88)
==10465== by 0x2950E5: rpc_tests::rpc_togglenetwork_invoker() (rpc_tests.cpp:84)
==10465== by 0x182496: invoke<void (*)()> (callback.hpp:56)
==10465== by 0x182496: boost::unit_test::ut_detail::callback0_impl_t<boost::unit_test::ut_detail::unused, void (*)()>::invoke() (callback.hpp:89)
...
```
The read of the uninitialized variable nLocalServices is triggered by g_connman->GetLocalServices()
in getnetworkinfo(const JSONRPCRequest& request) (net.cpp:462):
```c++
UniValue getnetworkinfo(const JSONRPCRequest& request)
{
...
if(g_connman)
obj.push_back(Pair("localservices", strprintf("%016x", g_connman->GetLocalServices())));
...
}
```
The reason for the uninitialized nLocalServices is that CConnman::Start(...) is not called
by the tests, and hence the initialization normally performed by CConnman::Start(...) is
not done.
This commit adds a method Init(const Options& connOptions) which is called by both the
constructor and CConnman::Start(...). This method initializes nLocalServices and the other
relevant values from the supplied Options object.
2017-08-02 14:02:36 +02:00
bool CConnman : : Start ( CScheduler & scheduler , const Options & connOptions )
2016-04-17 20:20:34 -04:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
[net] Fix use of uninitialized value in getnetworkinfo(const JSONRPCRequest& request)
When running test_bitcoin under Valgrind I found the following issue:
```
$ valgrind src/test/test_bitcoin
...
==10465== Use of uninitialised value of size 8
==10465== at 0x6D09B61: ??? (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B1BB: std::ostreambuf_iterator<char, std::char_traits<char> > std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::_M_insert_int<unsigned long>(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D0B36C: std::num_put<char, std::ostreambuf_iterator<char, std::char_traits<char> > >::do_put(std::ostreambuf_iterator<char, std::char_traits<char> >, std::ios_base&, char, unsigned long) const (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x6D17699: std::ostream& std::ostream::_M_insert<unsigned long>(unsigned long) (in /usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.21)
==10465== by 0x4CAAD7: operator<< (ostream:171)
==10465== by 0x4CAAD7: formatValue<ServiceFlags> (tinyformat.h:345)
==10465== by 0x4CAAD7: void tinyformat::detail::FormatArg::formatImpl<ServiceFlags>(std::ostream&, char const*, char const*, int, void const*) (tinyformat.h:523)
==10465== by 0x1924D4: format (tinyformat.h:510)
==10465== by 0x1924D4: tinyformat::detail::formatImpl(std::ostream&, char const*, tinyformat::detail::FormatArg const*, int) (tinyformat.h:803)
==10465== by 0x553A55: vformat (tinyformat.h:947)
==10465== by 0x553A55: format<ServiceFlags> (tinyformat.h:957)
==10465== by 0x553A55: std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > tinyformat::format<ServiceFlags>(char const*, ServiceFlags const&) (tinyformat.h:966)
==10465== by 0x54C952: getnetworkinfo(JSONRPCRequest const&) (net.cpp:462)
==10465== by 0x28EDB5: CallRPC(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) (rpc_tests.cpp:31)
==10465== by 0x293947: rpc_tests::rpc_togglenetwork::test_method() (rpc_tests.cpp:88)
==10465== by 0x2950E5: rpc_tests::rpc_togglenetwork_invoker() (rpc_tests.cpp:84)
==10465== by 0x182496: invoke<void (*)()> (callback.hpp:56)
==10465== by 0x182496: boost::unit_test::ut_detail::callback0_impl_t<boost::unit_test::ut_detail::unused, void (*)()>::invoke() (callback.hpp:89)
...
```
The read of the uninitialized variable nLocalServices is triggered by g_connman->GetLocalServices()
in getnetworkinfo(const JSONRPCRequest& request) (net.cpp:462):
```c++
UniValue getnetworkinfo(const JSONRPCRequest& request)
{
...
if(g_connman)
obj.push_back(Pair("localservices", strprintf("%016x", g_connman->GetLocalServices())));
...
}
```
The reason for the uninitialized nLocalServices is that CConnman::Start(...) is not called
by the tests, and hence the initialization normally performed by CConnman::Start(...) is
not done.
This commit adds a method Init(const Options& connOptions) which is called by both the
constructor and CConnman::Start(...). This method initializes nLocalServices and the other
relevant values from the supplied Options object.
2017-08-02 14:02:36 +02:00
Init ( connOptions ) ;
2020-10-22 20:34:31 +02:00
if ( fListen & & ! InitBinds ( connOptions ) ) {
2021-08-18 13:41:39 +08:00
if ( m_client_interface ) {
m_client_interface - > ThreadSafeMessageBox (
2020-04-11 18:47:17 +03:00
_ ( " Failed to listen on any port. Use -listen=0 if you want this. " ) ,
2017-06-01 12:34:02 +02:00
" " , CClientUIInterface : : MSG_ERROR ) ;
}
return false ;
}
2021-11-08 17:34:32 +01:00
Proxy i2p_sam ;
2022-06-08 17:59:32 +02:00
if ( GetProxy ( NET_I2P , i2p_sam ) & & connOptions . m_i2p_accept_incoming ) {
2021-05-04 13:00:25 +02:00
m_i2p_sam_session = std : : make_unique < i2p : : sam : : Session > ( gArgs . GetDataDirNet ( ) / " i2p_private_key " ,
2020-12-04 18:03:05 +01:00
i2p_sam . proxy , & interruptNet ) ;
}
2017-05-27 12:00:37 +02:00
for ( const auto & strDest : connOptions . vSeedNodes ) {
2020-07-17 14:56:34 -07:00
AddAddrFetch ( strDest ) ;
2017-05-27 12:00:37 +02:00
}
2020-09-12 18:05:54 +03:00
if ( m_use_addrman_outgoing ) {
// Load addresses from anchors.dat
2021-05-04 13:00:25 +02:00
m_anchors = ReadAnchors ( gArgs . GetDataDirNet ( ) / ANCHORS_DATABASE_FILENAME ) ;
2020-09-12 18:05:54 +03:00
if ( m_anchors . size ( ) > MAX_BLOCK_RELAY_ONLY_ANCHORS ) {
m_anchors . resize ( MAX_BLOCK_RELAY_ONLY_ANCHORS ) ;
}
LogPrintf ( " %i block-relay-only anchors will be tried for connections. \n " , m_anchors . size ( ) ) ;
}
2021-08-18 13:41:39 +08:00
if ( m_client_interface ) {
m_client_interface - > InitMessage ( _ ( " Starting network threads… " ) . translated ) ;
2021-08-18 13:37:27 +08:00
}
2016-07-22 16:01:12 +02:00
2014-09-18 14:08:43 +02:00
fAddressesInitialized = true ;
2017-08-07 07:36:37 +02:00
if ( semOutbound = = nullptr ) {
2012-05-10 18:44:07 +02:00
// initialize semaphore
2021-03-10 17:28:08 +08:00
semOutbound = std : : make_unique < CSemaphore > ( std : : min ( m_max_outbound , nMaxConnections ) ) ;
2012-05-10 18:44:07 +02:00
}
2017-08-07 07:36:37 +02:00
if ( semAddnode = = nullptr ) {
2016-12-11 04:39:26 +00:00
// initialize semaphore
2021-03-10 17:28:08 +08:00
semAddnode = std : : make_unique < CSemaphore > ( nMaxAddnode ) ;
2016-12-11 04:39:26 +00:00
}
2012-05-10 18:44:07 +02:00
2010-08-29 16:58:15 +00:00
//
// Start threads
//
2017-07-06 13:40:09 -04:00
assert ( m_msgproc ) ;
2016-12-27 17:13:31 -05:00
InterruptSocks5 ( false ) ;
2016-12-27 17:12:44 -05:00
interruptNet . reset ( ) ;
flagInterruptMsgProc = false ;
2010-08-29 16:58:15 +00:00
2016-12-31 02:05:26 -05:00
{
2017-11-08 17:07:40 -05:00
LOCK ( mutexMsgProc ) ;
2016-12-31 02:05:26 -05:00
fMsgProcWake = false ;
}
2016-12-27 17:11:57 -05:00
// Send and receive from sockets, accept connections
2021-04-13 21:22:52 +03:00
threadSocketHandler = std : : thread ( & util : : TraceThread , " net " , [ this ] { ThreadSocketHandler ( ) ; } ) ;
2010-08-29 16:58:15 +00:00
2020-09-05 09:51:33 -07:00
if ( ! gArgs . GetBoolArg ( " -dnsseed " , DEFAULT_DNSSEED ) )
2013-09-18 20:38:08 +10:00
LogPrintf ( " DNS seeding disabled \n " ) ;
2011-11-21 12:25:00 -05:00
else
2021-04-13 21:22:52 +03:00
threadDNSAddressSeed = std : : thread ( & util : : TraceThread , " dnsseed " , [ this ] { ThreadDNSAddressSeed ( ) ; } ) ;
2010-08-29 16:58:15 +00:00
2020-04-29 14:55:59 -07:00
// Initiate manual connections
2021-04-13 21:22:52 +03:00
threadOpenAddedConnections = std : : thread ( & util : : TraceThread , " addcon " , [ this ] { ThreadOpenAddedConnections ( ) ; } ) ;
2011-12-16 19:48:03 -05:00
2017-06-15 09:39:07 +02:00
if ( connOptions . m_use_addrman_outgoing & & ! connOptions . m_specified_outgoing . empty ( ) ) {
2021-08-18 13:41:39 +08:00
if ( m_client_interface ) {
m_client_interface - > ThreadSafeMessageBox (
2022-02-24 02:59:38 +02:00
_ ( " Cannot provide specific connections and have addrman find outgoing connections at the same time. " ) ,
2017-06-15 09:39:07 +02:00
" " , CClientUIInterface : : MSG_ERROR ) ;
}
return false ;
}
2021-04-13 21:22:52 +03:00
if ( connOptions . m_use_addrman_outgoing | | ! connOptions . m_specified_outgoing . empty ( ) ) {
threadOpenConnections = std : : thread (
& util : : TraceThread , " opencon " ,
[ this , connect = connOptions . m_specified_outgoing ] { ThreadOpenConnections ( connect ) ; } ) ;
}
2010-08-29 16:58:15 +00:00
// Process messages
2021-04-13 21:22:52 +03:00
threadMessageHandler = std : : thread ( & util : : TraceThread , " msghand " , [ this ] { ThreadMessageHandler ( ) ; } ) ;
2010-08-29 16:58:15 +00:00
2022-06-08 17:59:32 +02:00
if ( m_i2p_sam_session ) {
2020-11-24 11:28:52 +01:00
threadI2PAcceptIncoming =
2021-04-13 21:22:52 +03:00
std : : thread ( & util : : TraceThread , " i2paccept " , [ this ] { ThreadI2PAcceptIncoming ( ) ; } ) ;
2020-11-24 11:28:52 +01:00
}
2016-04-16 17:43:11 -04:00
// Dump network addresses
2020-03-06 18:06:50 -05:00
scheduler . scheduleEvery ( [ this ] { DumpAddresses ( ) ; } , DUMP_PEERS_INTERVAL ) ;
2016-04-16 17:43:11 -04:00
2016-04-16 14:47:18 -04:00
return true ;
2010-08-29 16:58:15 +00:00
}
class CNetCleanup
{
public :
2022-05-11 16:02:15 +01:00
CNetCleanup ( ) = default ;
2014-05-24 11:14:52 +02:00
2010-08-29 16:58:15 +00:00
~ CNetCleanup ( )
{
2011-10-07 11:02:21 -04:00
# ifdef WIN32
2010-08-29 16:58:15 +00:00
// Shutdown Windows Sockets
WSACleanup ( ) ;
# endif
}
2019-05-26 11:01:58 +02:00
} ;
static CNetCleanup instance_of_cnetcleanup ;
2012-08-13 05:26:30 +02:00
2016-12-27 17:12:44 -05:00
void CConnman : : Interrupt ( )
2016-04-16 14:47:18 -04:00
{
2016-12-27 17:12:44 -05:00
{
2019-05-30 13:44:02 +10:00
LOCK ( mutexMsgProc ) ;
2016-12-27 17:12:44 -05:00
flagInterruptMsgProc = true ;
}
condMsgProc . notify_all ( ) ;
interruptNet ( ) ;
2016-12-27 17:13:31 -05:00
InterruptSocks5 ( true ) ;
2016-12-27 17:12:44 -05:00
2017-03-08 14:55:28 -05:00
if ( semOutbound ) {
2019-03-09 12:55:06 -05:00
for ( int i = 0 ; i < m_max_outbound ; i + + ) {
2016-04-16 14:47:18 -04:00
semOutbound - > post ( ) ;
2017-03-08 14:55:28 -05:00
}
}
2017-03-08 14:41:57 -05:00
2017-03-08 14:55:28 -05:00
if ( semAddnode ) {
for ( int i = 0 ; i < nMaxAddnode ; i + + ) {
2017-03-08 14:41:57 -05:00
semAddnode - > post ( ) ;
2017-03-08 14:55:28 -05:00
}
}
2016-12-27 17:12:44 -05:00
}
2020-03-28 10:44:53 -04:00
void CConnman : : StopThreads ( )
2016-12-27 17:12:44 -05:00
{
2020-11-24 11:28:52 +01:00
if ( threadI2PAcceptIncoming . joinable ( ) ) {
threadI2PAcceptIncoming . join ( ) ;
}
2016-12-27 17:12:44 -05:00
if ( threadMessageHandler . joinable ( ) )
threadMessageHandler . join ( ) ;
if ( threadOpenConnections . joinable ( ) )
threadOpenConnections . join ( ) ;
if ( threadOpenAddedConnections . joinable ( ) )
threadOpenAddedConnections . join ( ) ;
if ( threadDNSAddressSeed . joinable ( ) )
threadDNSAddressSeed . join ( ) ;
if ( threadSocketHandler . joinable ( ) )
threadSocketHandler . join ( ) ;
2020-03-28 10:44:53 -04:00
}
2016-04-16 14:47:18 -04:00
2020-03-28 10:44:53 -04:00
void CConnman : : StopNodes ( )
{
if ( fAddressesInitialized ) {
2017-10-05 12:46:54 -04:00
DumpAddresses ( ) ;
2016-04-16 17:43:11 -04:00
fAddressesInitialized = false ;
2020-09-12 18:05:54 +03:00
if ( m_use_addrman_outgoing ) {
// Anchor connections are only dumped during clean shutdown.
std : : vector < CAddress > anchors_to_dump = GetCurrentBlockRelayOnlyConns ( ) ;
if ( anchors_to_dump . size ( ) > MAX_BLOCK_RELAY_ONLY_ANCHORS ) {
anchors_to_dump . resize ( MAX_BLOCK_RELAY_ONLY_ANCHORS ) ;
}
2021-05-04 13:00:25 +02:00
DumpAnchors ( gArgs . GetDataDirNet ( ) / ANCHORS_DATABASE_FILENAME , anchors_to_dump ) ;
2020-09-12 18:05:54 +03:00
}
2016-04-16 17:43:11 -04:00
}
2021-04-12 14:54:58 +03:00
// Delete peer connections.
2021-04-12 14:54:58 +03:00
std : : vector < CNode * > nodes ;
2021-08-28 20:57:52 +02:00
WITH_LOCK ( m_nodes_mutex , nodes . swap ( m_nodes ) ) ;
2021-04-12 14:54:58 +03:00
for ( CNode * pnode : nodes ) {
2017-02-06 14:05:45 -05:00
pnode - > CloseSocketDisconnect ( ) ;
2016-05-24 18:59:16 -04:00
DeleteNode ( pnode ) ;
}
2021-04-12 14:54:58 +03:00
2021-08-28 20:57:52 +02:00
for ( CNode * pnode : m_nodes_disconnected ) {
2016-05-24 18:59:16 -04:00
DeleteNode ( pnode ) ;
}
2021-08-28 20:57:52 +02:00
m_nodes_disconnected . clear ( ) ;
2016-04-16 14:47:18 -04:00
vhListenSocket . clear ( ) ;
2017-08-09 16:07:22 +02:00
semOutbound . reset ( ) ;
semAddnode . reset ( ) ;
2016-04-16 14:47:18 -04:00
}
2016-05-24 18:59:16 -04:00
void CConnman : : DeleteNode ( CNode * pnode )
{
assert ( pnode ) ;
2020-10-23 10:28:33 +01:00
m_msgproc - > FinalizeNode ( * pnode ) ;
2016-05-24 18:59:16 -04:00
delete pnode ;
}
2016-04-16 14:47:18 -04:00
CConnman : : ~ CConnman ( )
{
2016-12-27 17:12:44 -05:00
Interrupt ( ) ;
2016-09-13 14:42:55 -04:00
Stop ( ) ;
2016-04-16 14:47:18 -04:00
}
2012-08-13 05:26:30 +02:00
2021-05-02 19:05:42 +02:00
std : : vector < CAddress > CConnman : : GetAddresses ( size_t max_addresses , size_t max_pct , std : : optional < Network > network ) const
2016-04-16 17:43:11 -04:00
{
2021-05-02 19:05:42 +02:00
std : : vector < CAddress > addresses = addrman . GetAddr ( max_addresses , max_pct , network ) ;
2020-07-04 11:25:51 +03:00
if ( m_banman ) {
addresses . erase ( std : : remove_if ( addresses . begin ( ) , addresses . end ( ) ,
[ this ] ( const CAddress & addr ) { return m_banman - > IsDiscouraged ( addr ) | | m_banman - > IsBanned ( addr ) ; } ) ,
addresses . end ( ) ) ;
}
return addresses ;
2016-04-16 17:43:11 -04:00
}
2020-08-11 12:41:26 +03:00
std : : vector < CAddress > CConnman : : GetAddresses ( CNode & requestor , size_t max_addresses , size_t max_pct )
2020-05-16 21:05:44 -04:00
{
2020-11-24 15:36:27 +01:00
auto local_socket_bytes = requestor . addrBind . GetAddrBytes ( ) ;
2020-08-11 12:41:26 +03:00
uint64_t cache_id = GetDeterministicRandomizer ( RANDOMIZER_ID_ADDRCACHE )
2022-04-12 13:46:59 +02:00
. Write ( requestor . ConnectedThroughNetwork ( ) )
2023-07-17 03:33:13 +02:00
. Write ( local_socket_bytes )
2022-04-12 13:47:48 +02:00
// For outbound connections, the port of the bound address is randomly
// assigned by the OS and would therefore not be useful for seeding.
. Write ( requestor . IsInboundConn ( ) ? requestor . addrBind . GetPort ( ) : 0 )
2020-08-11 12:41:26 +03:00
. Finalize ( ) ;
2020-05-16 21:05:44 -04:00
const auto current_time = GetTime < std : : chrono : : microseconds > ( ) ;
2020-08-11 13:39:56 +03:00
auto r = m_addr_response_caches . emplace ( cache_id , CachedAddrResponse { } ) ;
CachedAddrResponse & cache_entry = r . first - > second ;
if ( cache_entry . m_cache_entry_expiration < current_time ) { // If emplace() added new one it has expiration 0.
2022-04-02 16:01:40 +01:00
cache_entry . m_addrs_response_cache = GetAddresses ( max_addresses , max_pct , /*network=*/ std : : nullopt ) ;
2020-08-11 10:42:26 +03:00
// Choosing a proper cache lifetime is a trade-off between the privacy leak minimization
// and the usefulness of ADDR responses to honest users.
//
// Longer cache lifetime makes it more difficult for an attacker to scrape
// enough AddrMan data to maliciously infer something useful.
// By the time an attacker scraped enough AddrMan records, most of
// the records should be old enough to not leak topology info by
// e.g. analyzing real-time changes in timestamps.
//
// It takes only several hundred requests to scrape everything from an AddrMan containing 100,000 nodes,
// so ~24 hours of cache lifetime indeed makes the data less inferable by the time
// most of it could be scraped (considering that timestamps are updated via
// ADDR self-announcements and when nodes communicate).
// We also should be robust to those attacks which may not require scraping *full* victim's AddrMan
// (because even several timestamps of the same handful of nodes may leak privacy).
//
// On the other hand, longer cache lifetime makes ADDR responses
// outdated and less useful for an honest requestor, e.g. if most nodes
// in the ADDR response are no longer active.
//
// However, the churn in the network is known to be rather low. Since we consider
// nodes to be "terrible" (see IsTerrible()) if the timestamps are older than 30 days,
// max. 24 hours of "penalty" due to cache shouldn't make any meaningful difference
// in terms of the freshness of the response.
2020-08-11 13:39:56 +03:00
cache_entry . m_cache_entry_expiration = current_time + std : : chrono : : hours ( 21 ) + GetRandMillis ( std : : chrono : : hours ( 6 ) ) ;
2020-05-16 21:05:44 -04:00
}
2020-08-11 13:39:56 +03:00
return cache_entry . m_addrs_response_cache ;
2020-05-16 21:05:44 -04:00
}
2021-12-28 13:26:20 -08:00
bool CConnman : : AddNode ( const AddedNodeParams & add )
2016-04-16 18:12:58 -04:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_added_nodes_mutex ) ;
2021-12-28 13:26:20 -08:00
for ( const auto & it : m_added_node_params ) {
if ( add . m_added_node = = it . m_added_node ) return false ;
2016-04-16 18:12:58 -04:00
}
2021-12-28 13:26:20 -08:00
m_added_node_params . push_back ( add ) ;
2016-04-16 18:12:58 -04:00
return true ;
}
bool CConnman : : RemoveAddedNode ( const std : : string & strNode )
{
2021-08-28 20:57:52 +02:00
LOCK ( m_added_nodes_mutex ) ;
2021-12-28 13:26:20 -08:00
for ( auto it = m_added_node_params . begin ( ) ; it ! = m_added_node_params . end ( ) ; + + it ) {
if ( strNode = = it - > m_added_node ) {
m_added_node_params . erase ( it ) ;
2016-04-16 18:12:58 -04:00
return true ;
}
}
return false ;
}
2021-04-17 19:17:40 +02:00
size_t CConnman : : GetNodeCount ( ConnectionDirection flags ) const
2016-04-16 18:30:03 -04:00
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
2019-10-16 17:37:19 +00:00
if ( flags = = ConnectionDirection : : Both ) // Shortcut if we want total
2021-08-28 20:57:52 +02:00
return m_nodes . size ( ) ;
2016-04-16 18:30:03 -04:00
int nNum = 0 ;
2021-08-28 20:57:52 +02:00
for ( const auto & pnode : m_nodes ) {
2019-10-16 17:37:19 +00:00
if ( flags & ( pnode - > IsInboundConn ( ) ? ConnectionDirection : : In : ConnectionDirection : : Out ) ) {
2016-04-16 18:30:03 -04:00
nNum + + ;
2017-07-20 11:32:47 +02:00
}
}
2016-04-16 18:30:03 -04:00
return nNum ;
}
2023-04-03 15:42:15 -03:00
uint32_t CConnman : : GetMappedAS ( const CNetAddr & addr ) const
{
return m_netgroupman . GetMappedAS ( addr ) ;
}
2021-04-17 19:17:40 +02:00
void CConnman : : GetNodeStats ( std : : vector < CNodeStats > & vstats ) const
2016-04-16 18:30:03 -04:00
{
vstats . clear ( ) ;
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
vstats . reserve ( m_nodes . size ( ) ) ;
for ( CNode * pnode : m_nodes ) {
2017-02-06 11:44:38 -05:00
vstats . emplace_back ( ) ;
2021-09-01 11:24:46 +01:00
pnode - > CopyStats ( vstats . back ( ) ) ;
2023-04-03 15:42:15 -03:00
vstats . back ( ) . m_mapped_as = GetMappedAS ( pnode - > addr ) ;
2016-04-16 18:30:03 -04:00
}
}
bool CConnman : : DisconnectNode ( const std : : string & strNode )
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
2016-04-16 18:30:03 -04:00
if ( CNode * pnode = FindNode ( strNode ) ) {
2020-12-18 07:18:28 +10:00
LogPrint ( BCLog : : NET , " disconnect by address%s matched peer=%d; disconnecting \n " , ( fLogIPs ? strprintf ( " =%s " , strNode ) : " " ) , pnode - > GetId ( ) ) ;
2016-04-16 18:30:03 -04:00
pnode - > fDisconnect = true ;
return true ;
}
return false ;
}
2017-10-04 18:25:34 -04:00
bool CConnman : : DisconnectNode ( const CSubNet & subnet )
{
bool disconnected = false ;
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2017-10-04 18:25:34 -04:00
if ( subnet . Match ( pnode - > addr ) ) {
2020-12-18 07:18:28 +10:00
LogPrint ( BCLog : : NET , " disconnect by subnet%s matched peer=%d; disconnecting \n " , ( fLogIPs ? strprintf ( " =%s " , subnet . ToString ( ) ) : " " ) , pnode - > GetId ( ) ) ;
2017-10-04 18:25:34 -04:00
pnode - > fDisconnect = true ;
disconnected = true ;
}
}
return disconnected ;
}
bool CConnman : : DisconnectNode ( const CNetAddr & addr )
{
return DisconnectNode ( CSubNet ( addr ) ) ;
}
2016-04-16 18:30:03 -04:00
bool CConnman : : DisconnectNode ( NodeId id )
{
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( CNode * pnode : m_nodes ) {
2017-04-11 12:13:55 -04:00
if ( id = = pnode - > GetId ( ) ) {
2020-12-18 07:18:28 +10:00
LogPrint ( BCLog : : NET , " disconnect by id peer=%d; disconnecting \n " , pnode - > GetId ( ) ) ;
2016-04-16 18:30:03 -04:00
pnode - > fDisconnect = true ;
return true ;
}
}
return false ;
}
2016-04-18 21:44:42 -04:00
void CConnman : : RecordBytesRecv ( uint64_t bytes )
2013-08-23 02:09:32 +10:00
{
nTotalBytesRecv + = bytes ;
}
2016-04-18 21:44:42 -04:00
void CConnman : : RecordBytesSent ( uint64_t bytes )
2013-08-23 02:09:32 +10:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:18:52 -03:00
2013-08-23 02:09:32 +10:00
nTotalBytesSent + = bytes ;
2015-09-02 17:03:27 +02:00
2020-10-24 19:13:42 +08:00
const auto now = GetTime < std : : chrono : : seconds > ( ) ;
2020-10-24 16:33:26 +08:00
if ( nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now )
2015-09-02 17:03:27 +02:00
{
// timeframe expired, reset cycle
nMaxOutboundCycleStartTime = now ;
nMaxOutboundTotalBytesSentInCycle = 0 ;
}
nMaxOutboundTotalBytesSentInCycle + = bytes ;
}
2021-04-17 19:17:40 +02:00
uint64_t CConnman : : GetMaxOutboundTarget ( ) const
2015-09-02 17:03:27 +02:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2015-09-02 17:03:27 +02:00
return nMaxOutboundLimit ;
}
2021-04-17 19:17:40 +02:00
std : : chrono : : seconds CConnman : : GetMaxOutboundTimeframe ( ) const
2015-09-02 17:03:27 +02:00
{
2020-10-24 16:33:26 +08:00
return MAX_UPLOAD_TIMEFRAME ;
2015-09-02 17:03:27 +02:00
}
2021-04-17 19:17:40 +02:00
std : : chrono : : seconds CConnman : : GetMaxOutboundTimeLeftInCycle ( ) const
2015-09-02 17:03:27 +02:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:18:52 -03:00
return GetMaxOutboundTimeLeftInCycle_ ( ) ;
}
std : : chrono : : seconds CConnman : : GetMaxOutboundTimeLeftInCycle_ ( ) const
{
AssertLockHeld ( m_total_bytes_sent_mutex ) ;
2015-09-02 17:03:27 +02:00
if ( nMaxOutboundLimit = = 0 )
2020-10-24 19:13:42 +08:00
return 0 s ;
2015-09-02 17:03:27 +02:00
2020-10-24 19:13:42 +08:00
if ( nMaxOutboundCycleStartTime . count ( ) = = 0 )
2020-10-24 16:33:26 +08:00
return MAX_UPLOAD_TIMEFRAME ;
2015-09-02 17:03:27 +02:00
2020-10-24 19:13:42 +08:00
const std : : chrono : : seconds cycleEndTime = nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME ;
const auto now = GetTime < std : : chrono : : seconds > ( ) ;
return ( cycleEndTime < now ) ? 0 s : cycleEndTime - now ;
2015-09-02 17:03:27 +02:00
}
2021-04-17 19:17:40 +02:00
bool CConnman : : OutboundTargetReached ( bool historicalBlockServingLimit ) const
2015-09-02 17:03:27 +02:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2015-09-02 17:03:27 +02:00
if ( nMaxOutboundLimit = = 0 )
return false ;
if ( historicalBlockServingLimit )
{
2016-01-17 11:03:56 +00:00
// keep a large enough buffer to at least relay each block once
2022-01-25 18:18:52 -03:00
const std : : chrono : : seconds timeLeftInCycle = GetMaxOutboundTimeLeftInCycle_ ( ) ;
2020-10-24 19:13:42 +08:00
const uint64_t buffer = timeLeftInCycle / std : : chrono : : minutes { 10 } * MAX_BLOCK_SERIALIZED_SIZE ;
2015-09-02 17:03:27 +02:00
if ( buffer > = nMaxOutboundLimit | | nMaxOutboundTotalBytesSentInCycle > = nMaxOutboundLimit - buffer )
return true ;
}
else if ( nMaxOutboundTotalBytesSentInCycle > = nMaxOutboundLimit )
return true ;
return false ;
}
2021-04-17 19:17:40 +02:00
uint64_t CConnman : : GetOutboundTargetBytesLeft ( ) const
2015-09-02 17:03:27 +02:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2015-09-02 17:03:27 +02:00
if ( nMaxOutboundLimit = = 0 )
return 0 ;
return ( nMaxOutboundTotalBytesSentInCycle > = nMaxOutboundLimit ) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle ;
2013-08-23 02:09:32 +10:00
}
2021-04-17 19:17:40 +02:00
uint64_t CConnman : : GetTotalBytesRecv ( ) const
2013-08-23 02:09:32 +10:00
{
return nTotalBytesRecv ;
}
2021-04-17 19:17:40 +02:00
uint64_t CConnman : : GetTotalBytesSent ( ) const
2013-08-23 02:09:32 +10:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2022-01-25 18:05:04 -03:00
LOCK ( m_total_bytes_sent_mutex ) ;
2013-08-23 02:09:32 +10:00
return nTotalBytesSent ;
}
2013-10-28 16:28:00 +10:00
2016-04-19 00:04:58 -04:00
ServiceFlags CConnman : : GetLocalServices ( ) const
{
return nLocalServices ;
}
2023-08-21 16:55:47 -04:00
static std : : unique_ptr < Transport > MakeTransport ( NodeId id , bool use_v2transport , bool inbound ) noexcept
{
if ( use_v2transport ) {
return std : : make_unique < V2Transport > ( id , /*initiating=*/ ! inbound , SER_NETWORK , INIT_PROTO_VERSION ) ;
} else {
return std : : make_unique < V1Transport > ( id , SER_NETWORK , INIT_PROTO_VERSION ) ;
}
}
2022-06-08 17:26:24 +02:00
CNode : : CNode ( NodeId idIn ,
std : : shared_ptr < Sock > sock ,
const CAddress & addrIn ,
uint64_t nKeyedNetGroupIn ,
uint64_t nLocalHostNonceIn ,
const CAddress & addrBindIn ,
const std : : string & addrNameIn ,
ConnectionType conn_type_in ,
bool inbound_onion ,
2022-08-31 17:04:13 +10:00
CNodeOptions & & node_opts )
2023-08-21 16:55:47 -04:00
: m_transport { MakeTransport ( idIn , node_opts . use_v2transport , conn_type_in = = ConnectionType : : INBOUND ) } ,
2022-09-01 18:50:26 +10:00
m_permission_flags { node_opts . permission_flags } ,
2022-05-20 05:37:54 +10:00
m_sock { sock } ,
2021-04-23 15:15:23 +02:00
m_connected { GetTime < std : : chrono : : seconds > ( ) } ,
2022-06-08 17:26:24 +02:00
addr { addrIn } ,
addrBind { addrBindIn } ,
2022-07-18 13:28:40 +02:00
m_addr_name { addrNameIn . empty ( ) ? addr . ToStringAddrPort ( ) : addrNameIn } ,
2023-08-22 20:42:24 -04:00
m_dest ( addrNameIn ) ,
2022-06-08 17:26:24 +02:00
m_inbound_onion { inbound_onion } ,
2022-09-01 18:44:33 +10:00
m_prefer_evict { node_opts . prefer_evict } ,
2022-06-08 17:26:24 +02:00
nKeyedNetGroup { nKeyedNetGroupIn } ,
2023-03-24 15:29:21 +01:00
m_conn_type { conn_type_in } ,
2022-06-08 17:26:24 +02:00
id { idIn } ,
nLocalHostNonce { nLocalHostNonceIn } ,
2023-03-24 15:45:50 +01:00
m_recv_flood_size { node_opts . recv_flood_size } ,
2022-08-31 17:04:13 +10:00
m_i2p_sam_session { std : : move ( node_opts . i2p_sam_session ) }
2014-08-21 05:17:21 +02:00
{
2020-10-21 11:52:19 +02:00
if ( inbound_onion ) assert ( conn_type_in = = ConnectionType : : INBOUND ) ;
2020-08-10 14:48:54 -07:00
2017-06-02 03:18:57 +02:00
for ( const std : : string & msg : getAllNetMessageTypes ( ) )
2022-04-07 17:13:52 +05:30
mapRecvBytesPerMsgType [ msg ] = 0 ;
mapRecvBytesPerMsgType [ NET_MESSAGE_TYPE_OTHER ] = 0 ;
2014-08-21 05:17:21 +02:00
2016-12-25 20:19:40 +00:00
if ( fLogIPs ) {
2021-08-24 19:54:13 +02:00
LogPrint ( BCLog : : NET , " Added connection to %s peer=%d \n " , m_addr_name , id ) ;
2016-12-25 20:19:40 +00:00
} else {
LogPrint ( BCLog : : NET , " Added connection peer=%d \n " , id ) ;
}
2014-08-21 05:17:21 +02:00
}
2023-03-24 15:45:50 +01:00
void CNode : : MarkReceivedMsgsForProcessing ( )
2023-03-14 17:38:46 +01:00
{
2023-03-14 18:24:58 +01:00
AssertLockNotHeld ( m_msg_process_queue_mutex ) ;
2023-03-14 17:38:46 +01:00
size_t nSizeAdded = 0 ;
for ( const auto & msg : vRecvMsg ) {
// vRecvMsg contains only completed CNetMessage
// the single possible partially deserialized message are held by TransportDeserializer
nSizeAdded + = msg . m_raw_message_size ;
}
2023-03-14 18:24:58 +01:00
LOCK ( m_msg_process_queue_mutex ) ;
m_msg_process_queue . splice ( m_msg_process_queue . end ( ) , vRecvMsg ) ;
m_msg_process_queue_size + = nSizeAdded ;
2023-03-24 15:45:50 +01:00
fPauseRecv = m_msg_process_queue_size > m_recv_flood_size ;
2023-03-14 17:38:46 +01:00
}
2023-03-24 15:45:50 +01:00
std : : optional < std : : pair < CNetMessage , bool > > CNode : : PollMessage ( )
2023-03-14 17:58:59 +01:00
{
2023-03-14 18:24:58 +01:00
LOCK ( m_msg_process_queue_mutex ) ;
if ( m_msg_process_queue . empty ( ) ) return std : : nullopt ;
2023-03-14 17:58:59 +01:00
std : : list < CNetMessage > msgs ;
// Just take one message
2023-03-14 18:24:58 +01:00
msgs . splice ( msgs . begin ( ) , m_msg_process_queue , m_msg_process_queue . begin ( ) ) ;
m_msg_process_queue_size - = msgs . front ( ) . m_raw_message_size ;
2023-03-24 15:45:50 +01:00
fPauseRecv = m_msg_process_queue_size > m_recv_flood_size ;
2023-03-14 17:58:59 +01:00
2023-03-14 18:24:58 +01:00
return std : : make_pair ( std : : move ( msgs . front ( ) ) , ! m_msg_process_queue . empty ( ) ) ;
2023-03-14 17:58:59 +01:00
}
2017-01-20 20:34:57 -05:00
bool CConnman : : NodeFullyConnected ( const CNode * pnode )
{
return pnode & & pnode - > fSuccessfullyConnected & & ! pnode - > fDisconnect ;
}
2016-11-10 20:17:30 -05:00
void CConnman : : PushMessage ( CNode * pnode , CSerializedNetMsg & & msg )
2016-09-12 20:00:33 -04:00
{
2022-01-25 18:18:52 -03:00
AssertLockNotHeld ( m_total_bytes_sent_mutex ) ;
2016-11-10 20:17:30 -05:00
size_t nMessageSize = msg . data . size ( ) ;
2021-05-20 16:54:54 +02:00
LogPrint ( BCLog : : NET , " sending %s (%d bytes) peer=%d \n " , msg . m_type , nMessageSize , pnode - > GetId ( ) ) ;
2020-07-13 14:00:03 -04:00
if ( gArgs . GetBoolArg ( " -capturemessages " , false ) ) {
2021-09-15 11:10:51 +02:00
CaptureMessage ( pnode - > addr , msg . m_type , msg . data , /*is_incoming=*/ false ) ;
2020-07-13 14:00:03 -04:00
}
2016-09-12 20:00:33 -04:00
2021-05-20 16:54:54 +02:00
TRACE6 ( net , outbound_message ,
pnode - > GetId ( ) ,
2021-08-26 10:39:10 +02:00
pnode - > m_addr_name . c_str ( ) ,
2021-05-20 16:54:54 +02:00
pnode - > ConnectionTypeAsString ( ) . c_str ( ) ,
msg . m_type . c_str ( ) ,
msg . data . size ( ) ,
msg . data . data ( )
) ;
2016-09-12 20:00:33 -04:00
size_t nBytesSent = 0 ;
{
LOCK ( pnode - > cs_vSend ) ;
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// Check if the transport still has unsent bytes, and indicate to it that we're about to
// give it a message to send.
const auto & [ to_send , more , _msg_type ] =
pnode - > m_transport - > GetBytesToSend ( /*have_next_message=*/ true ) ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
const bool queue_was_empty { to_send . empty ( ) & & pnode - > vSendMsg . empty ( ) } ;
// Update memory usage of send buffer.
pnode - > m_send_memusage + = msg . GetMemoryUsage ( ) ;
2023-07-24 13:23:39 -04:00
if ( pnode - > m_send_memusage + pnode - > m_transport - > GetSendMemoryUsage ( ) > nSendBufferMaxSize ) pnode - > fPauseSend = true ;
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
// Move message to vSendMsg queue.
pnode - > vSendMsg . push_back ( std : : move ( msg ) ) ;
2023-07-21 16:31:59 -04:00
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// If there was nothing to send before, and there is now (predicted by the "more" value
// returned by the GetBytesToSend call above), attempt "optimistic write":
2023-07-21 16:31:59 -04:00
// because the poll/select loop may pause for SELECT_TIMEOUT_MILLISECONDS before actually
// doing a send, try sending from the calling thread if the queue was empty before.
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
// With a V1Transport, more will always be true here, because adding a message always
2023-07-27 15:10:34 -04:00
// results in sendable bytes there, but with V2Transport this is not the case (it may
// still be in the handshake).
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
if ( queue_was_empty & & more ) {
2023-07-21 16:31:59 -04:00
std : : tie ( nBytesSent , std : : ignore ) = SocketSendData ( * pnode ) ;
}
2016-09-12 20:00:33 -04:00
}
2020-07-22 08:59:16 -04:00
if ( nBytesSent ) RecordBytesSent ( nBytesSent ) ;
2016-09-12 20:00:33 -04:00
}
2016-04-16 19:13:12 -04:00
bool CConnman : : ForNode ( NodeId id , std : : function < bool ( CNode * pnode ) > func )
{
CNode * found = nullptr ;
2021-08-28 20:57:52 +02:00
LOCK ( m_nodes_mutex ) ;
for ( auto & & pnode : m_nodes ) {
2017-04-11 12:13:55 -04:00
if ( pnode - > GetId ( ) = = id ) {
2016-04-16 19:13:12 -04:00
found = pnode ;
break ;
}
}
2017-01-20 20:34:57 -05:00
return found ! = nullptr & & NodeFullyConnected ( found ) & & func ( found ) ;
2016-04-16 19:13:12 -04:00
}
2017-01-24 02:32:52 +01:00
CSipHasher CConnman : : GetDeterministicRandomizer ( uint64_t id ) const
2016-05-25 15:38:32 +02:00
{
2016-09-09 12:48:10 +02:00
return CSipHasher ( nSeed0 , nSeed1 ) . Write ( id ) ;
}
2016-05-25 15:38:32 +02:00
2021-09-01 12:12:52 +01:00
uint64_t CConnman : : CalculateKeyedNetGroup ( const CAddress & address ) const
2016-09-09 12:48:10 +02:00
{
2021-09-01 12:12:52 +01:00
std : : vector < unsigned char > vchNetGroup ( m_netgroupman . GetGroup ( address ) ) ;
2016-05-25 15:38:32 +02:00
2023-07-17 03:33:13 +02:00
return GetDeterministicRandomizer ( RANDOMIZER_ID_NETGROUP ) . Write ( vchNetGroup ) . Finalize ( ) ;
2016-05-25 15:38:32 +02:00
}
2020-07-13 13:20:47 -04:00
2023-08-22 20:42:24 -04:00
void CConnman : : PerformReconnections ( )
{
AssertLockNotHeld ( m_reconnections_mutex ) ;
AssertLockNotHeld ( m_unused_i2p_sessions_mutex ) ;
while ( true ) {
// Move first element of m_reconnections to todo (avoiding an allocation inside the lock).
decltype ( m_reconnections ) todo ;
{
LOCK ( m_reconnections_mutex ) ;
if ( m_reconnections . empty ( ) ) break ;
todo . splice ( todo . end ( ) , m_reconnections , m_reconnections . begin ( ) ) ;
}
auto & item = * todo . begin ( ) ;
OpenNetworkConnection ( item . addr_connect ,
// We only reconnect if the first attempt to connect succeeded at
// connection time, but then failed after the CNode object was
// created. Since we already know connecting is possible, do not
// count failure to reconnect.
/*fCountFailure=*/ false ,
std : : move ( item . grant ) ,
item . destination . empty ( ) ? nullptr : item . destination . c_str ( ) ,
item . conn_type ,
item . use_v2transport ) ;
}
}
2023-07-13 14:30:30 -06:00
// Dump binary message to file, with timestamp.
static void CaptureMessageToFile ( const CAddress & addr ,
const std : : string & msg_type ,
Span < const unsigned char > data ,
bool is_incoming )
2020-07-13 13:20:47 -04:00
{
// Note: This function captures the message at the time of processing,
// not at socket receive/send time.
// This ensures that the messages are always in order from an application
// layer (processing) perspective.
auto now = GetTime < std : : chrono : : microseconds > ( ) ;
2022-02-17 12:54:39 +01:00
// Windows folder names cannot include a colon
2022-07-15 14:13:39 +02:00
std : : string clean_addr = addr . ToStringAddrPort ( ) ;
2020-07-13 13:20:47 -04:00
std : : replace ( clean_addr . begin ( ) , clean_addr . end ( ) , ' : ' , ' _ ' ) ;
2022-03-03 14:40:18 -05:00
fs : : path base_path = gArgs . GetDataDirNet ( ) / " message_capture " / fs : : u8path ( clean_addr ) ;
2020-07-13 13:20:47 -04:00
fs : : create_directories ( base_path ) ;
fs : : path path = base_path / ( is_incoming ? " msgs_recv.dat " : " msgs_sent.dat " ) ;
2022-06-06 17:22:59 +02:00
AutoFile f { fsbridge : : fopen ( path , " ab " ) } ;
2020-07-13 13:20:47 -04:00
ser_writedata64 ( f , now . count ( ) ) ;
2023-06-22 17:02:28 +02:00
f < < Span { msg_type } ;
2020-07-13 13:20:47 -04:00
for ( auto i = msg_type . length ( ) ; i < CMessageHeader : : COMMAND_SIZE ; + + i ) {
2021-05-31 14:57:32 +02:00
f < < uint8_t { ' \0 ' } ;
2020-07-13 13:20:47 -04:00
}
uint32_t size = data . size ( ) ;
ser_writedata32 ( f , size ) ;
2023-06-22 17:02:28 +02:00
f < < data ;
2020-07-13 13:20:47 -04:00
}
2021-07-22 18:23:21 +02:00
std : : function < void ( const CAddress & addr ,
const std : : string & msg_type ,
2021-07-29 17:47:15 +02:00
Span < const unsigned char > data ,
2021-07-22 18:23:21 +02:00
bool is_incoming ) >
CaptureMessage = CaptureMessageToFile ;