Skip to content
This repository has been archived by the owner on Oct 28, 2021. It is now read-only.

Various network-related logging changes #5605

Merged
merged 20 commits into from
May 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
- Changed: [#5576](https://github.com/ethereum/aleth/pull/5576) Moved sstore_combinations and static_Call50000_sha256 tests to stTimeConsuming test suite. (testeth runs them only with `--all` flag)
- Changed: [#5589](https://github.com/ethereum/aleth/pull/5589) Make aleth output always line-buffered even when redirected to file or pipe.
- Changed: [#5602](https://github.com/ethereum/aleth/pull/5602) Better predicting external IP address and UDP port.
- Changed: [#5605](https://github.com/ethereum/aleth/pull/5605) Network logging bugfixes and improvements and add warpcap log channel.
- Fixed: [#5562](https://github.com/ethereum/aleth/pull/5562) Don't send header request messages to peers that haven't sent us Status yet.
- Fixed: [#5581](https://github.com/ethereum/aleth/pull/5581) Fixed finding neighbour nodes in Discovery.
- Fixed: [#5599](https://github.com/ethereum/aleth/pull/5600) Prevent aleth from attempting concurrent connection to node which results in disconnect of original connection.
Expand Down
2 changes: 1 addition & 1 deletion aleth/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ int main(int argc, char** argv)

std::string const logChannels =
"block blockhdr bq chain client debug discov error ethcap exec host impolite info net "
"overlaydb p2pcap peer rlpx rpc snap statedb sync timer tq trace vmtrace warn watch";
"overlaydb p2pcap peer rlpx rpc snap statedb sync timer tq trace vmtrace warn warpcap watch";
LoggingOptions loggingOptions;
po::options_description loggingProgramOptions(
createLoggingProgramOptions(c_lineWidth, loggingOptions, logChannels));
Expand Down
33 changes: 10 additions & 23 deletions libdevcore/Log.cpp
Original file line number Diff line number Diff line change
@@ -1,23 +1,6 @@
/*
This file is part of cpp-ethereum.

cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file Log.cpp
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
/// Aleth: Ethereum C++ client, tools and libraries.
// Copyright 2019 Aleth Authors.
// Licensed under the GNU General Public License, Version 3.

#include "Log.h"

Expand Down Expand Up @@ -48,7 +31,8 @@ using log_sink = boost::log::sinks::synchronous_sink<T>;
namespace dev
{
BOOST_LOG_ATTRIBUTE_KEYWORD(channel, "Channel", std::string)
BOOST_LOG_ATTRIBUTE_KEYWORD(context, "Context", std::string)
BOOST_LOG_ATTRIBUTE_KEYWORD(prefix, "Prefix", std::string)
BOOST_LOG_ATTRIBUTE_KEYWORD(suffix, "Suffix", std::string)
BOOST_LOG_ATTRIBUTE_KEYWORD(severity, "Severity", int)
BOOST_LOG_ATTRIBUTE_KEYWORD(threadName, "ThreadName", std::string)
BOOST_LOG_ATTRIBUTE_KEYWORD(timestamp, "TimeStamp", boost::posix_time::ptime)
Expand Down Expand Up @@ -96,10 +80,13 @@ void formatter(boost::log::record_view const& _rec, boost::log::formatting_ostre

_strm << EthNavy << std::setw(4) << std::left << _rec[threadName] << EthReset " ";
_strm << std::setw(6) << std::left << _rec[channel] << " ";
if (boost::log::expressions::has_attr(context)(_rec))
_strm << EthNavy << _rec[context] << EthReset " ";
if (boost::log::expressions::has_attr(prefix)(_rec))
_strm << EthNavy << _rec[prefix] << EthReset " ";

_strm << _rec[boost::log::expressions::smessage];

if (boost::log::expressions::has_attr(suffix)(_rec))
_strm << " " EthNavy << _rec[suffix] << EthReset;
}

} // namespace
Expand Down
24 changes: 3 additions & 21 deletions libdevcore/Log.h
Original file line number Diff line number Diff line change
@@ -1,19 +1,6 @@
/*
This file is part of cpp-ethereum.

cpp-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

cpp-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/// Aleth: Ethereum C++ client, tools and libraries.
// Copyright 2019 Aleth Authors.
// Licensed under the GNU General Public License, Version 3.

#pragma once

Expand Down Expand Up @@ -128,11 +115,6 @@ inline Logger createLogger(int _severity, std::string const& _channel)
boost::log::keywords::severity = _severity, boost::log::keywords::channel = _channel);
}

// Adds the context string to all log messages in the scope
#define LOG_SCOPED_CONTEXT(context) \
BOOST_LOG_SCOPED_THREAD_ATTR("Context", boost::log::attributes::constant<std::string>(context));


// Below overloads for both const and non-const references are needed, because without overload for
// non-const reference generic operator<<(formatting_ostream& _strm, T& _value) will be preferred by
// overload resolution.
Expand Down
83 changes: 48 additions & 35 deletions libethereum/BlockChainSync.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ void BlockChainSync::onPeerStatus(EthereumPeer const& _peer)

if (!disconnectReason.empty())
{
LOG(m_logger) << "Peer not suitable for sync: " << disconnectReason;
LOG(m_logger) << "Peer " << _peer.id() << " not suitable for sync: " << disconnectReason;
m_host.capabilityHost().disconnect(_peer.id(), p2p::UserReason);
return;
}
Expand Down Expand Up @@ -255,13 +255,15 @@ void BlockChainSync::syncPeer(NodeID const& _peerID, bool _force)
if (_force || peerTotalDifficulty > syncingDifficulty)
{
if (peerTotalDifficulty > syncingDifficulty)
LOG(m_logger) << "Discovered new highest difficulty";
LOG(m_logger) << "Discovered new highest difficulty (" << peerTotalDifficulty
<< ") from peer " << peer.id();

// start sync
m_syncingTotalDifficulty = peerTotalDifficulty;
if (m_state == SyncState::Idle || m_state == SyncState::NotSynced)
{
LOG(m_loggerInfo) << "Starting full sync";
LOG(m_logger) << "Syncing with peer " << peer.id();
m_state = SyncState::Blocks;
}
peer.requestBlockHeaders(peer.latestHash(), 1, 0, false);
Expand Down Expand Up @@ -289,9 +291,8 @@ void BlockChainSync::requestBlocks(NodeID const& _peerID)
clearPeerDownload(_peerID);
if (host().bq().knownFull())
{
LOG(m_loggerDetail)
<< "Waiting for block queue before downloading blocks. Block queue status: "
<< host().bq().status();
LOG(m_loggerDetail) << "Waiting for block queue before downloading blocks from " << _peerID
<< ". Block queue status: " << host().bq().status();
pauseSync();
return;
}
Expand Down Expand Up @@ -379,7 +380,7 @@ void BlockChainSync::requestBlocks(NodeID const& _peerID)
}
}
else
m_host.peer(_peerID).requestBlockHeaders(start, 1, 0, false);
m_host.peer(_peerID).requestBlockHeaders(start, 1 /* count */, 0 /* skip */, false);
}
}

Expand Down Expand Up @@ -446,7 +447,7 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
DEV_INVARIANT_CHECK;
size_t itemCount = _r.itemCount();
LOG(m_logger) << "BlocksHeaders (" << dec << itemCount << " entries) "
<< (itemCount ? "" : ": NoMoreHeaders");
<< (itemCount ? "" : ": NoMoreHeaders") << " from " << _peerID;

if (m_daoChallengedPeers.find(_peerID) != m_daoChallengedPeers.end())
{
Expand All @@ -462,17 +463,17 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
clearPeerDownload(_peerID);
if (m_state != SyncState::Blocks && m_state != SyncState::Waiting)
{
LOG(m_logger) << "Ignoring unexpected blocks";
LOG(m_logger) << "Ignoring unexpected blocks from " << _peerID;
return;
}
if (m_state == SyncState::Waiting)
{
LOG(m_loggerDetail) << "Ignored blocks while waiting";
LOG(m_loggerDetail) << "Ignored blocks from " << _peerID << " while waiting";
return;
}
if (itemCount == 0)
{
LOG(m_loggerDetail) << "Peer does not have the blocks requested";
LOG(m_loggerDetail) << "Peer " << _peerID << " does not have the blocks requested";
m_host.capabilityHost().updateRating(_peerID, -1);
}
for (unsigned i = 0; i < itemCount; i++)
Expand All @@ -481,17 +482,19 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
unsigned blockNumber = static_cast<unsigned>(info.number());
if (blockNumber < m_chainStartBlock)
{
LOG(m_logger) << "Skipping too old header " << blockNumber;
LOG(m_logger) << "Skipping too old header " << blockNumber << " from " << _peerID;
continue;
}
if (haveItem(m_headers, blockNumber))
{
LOG(m_logger) << "Skipping header " << blockNumber << " (already downloaded)";
LOG(m_logger) << "Skipping header " << blockNumber << " (already downloaded) from "
<< _peerID;
continue;
}
if (blockNumber <= m_lastImportedBlock && m_haveCommonHeader)
{
LOG(m_logger) << "Skipping header " << blockNumber << " (already imported)";
LOG(m_logger) << "Skipping header " << blockNumber << " (already imported) from "
<< _peerID;
continue;
}
if (blockNumber > m_highestBlock)
Expand All @@ -509,8 +512,8 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
{
// Start of the header chain in m_headers doesn't match our known chain,
// probably we've downloaded other fork
clog(VerbosityWarning, "sync")
<< "Unknown parent of the downloaded headers, restarting sync";
LOG(m_loggerWarning)
<< "Unknown parent of the downloaded headers, restarting sync with " << _peerID;
restartSync();
return;
}
Expand All @@ -527,8 +530,9 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
if ((prevBlock && prevBlock->hash != info.parentHash()) || (blockNumber == m_lastImportedBlock + 1 && info.parentHash() != m_lastImportedBlockHash))
{
// mismatching parent id, delete the previous block and don't add this one
clog(VerbosityWarning, "sync") << "Unknown block header " << blockNumber << " "
<< info.hash() << " (Restart syncing)";
LOG(m_loggerWarning)
<< "Unknown block header " << blockNumber << " " << info.hash()
<< " (Restart syncing with " << _peerID << ")";
m_host.capabilityHost().updateRating(_peerID, -1);
restartSync();
return ;
Expand All @@ -537,8 +541,8 @@ void BlockChainSync::onPeerBlockHeaders(NodeID const& _peerID, RLP const& _r)
Header const* nextBlock = findItem(m_headers, blockNumber + 1);
if (nextBlock && nextBlock->parent != info.hash())
{
LOG(m_loggerDetail)
<< "Unknown block header " << blockNumber + 1 << " " << nextBlock->hash;
LOG(m_loggerDetail) << "Unknown block header " << blockNumber + 1 << " "
<< nextBlock->hash << " from " << _peerID;
// clear following headers
unsigned n = blockNumber + 1;
auto headers = m_headers.at(n);
Expand Down Expand Up @@ -590,20 +594,20 @@ void BlockChainSync::onPeerBlockBodies(NodeID const& _peerID, RLP const& _r)
DEV_INVARIANT_CHECK;
size_t itemCount = _r.itemCount();
LOG(m_logger) << "BlocksBodies (" << dec << itemCount << " entries) "
<< (itemCount ? "" : ": NoMoreBodies");
<< (itemCount ? "" : ": NoMoreBodies") << " from " << _peerID;
clearPeerDownload(_peerID);
if (m_state != SyncState::Blocks && m_state != SyncState::Waiting) {
LOG(m_logger) << "Ignoring unexpected blocks";
LOG(m_logger) << "Ignoring unexpected blocks from " << _peerID;
return;
}
if (m_state == SyncState::Waiting)
{
LOG(m_loggerDetail) << "Ignored blocks while waiting";
LOG(m_loggerDetail) << "Ignored blocks from " << _peerID << " while waiting";
return;
}
if (itemCount == 0)
{
LOG(m_loggerDetail) << "Peer does not have the blocks requested";
LOG(m_loggerDetail) << "Peer " << _peerID << " does not have the blocks requested";
m_host.capabilityHost().updateRating(_peerID, -1);
}
for (unsigned i = 0; i < itemCount; i++)
Expand All @@ -617,13 +621,14 @@ void BlockChainSync::onPeerBlockBodies(NodeID const& _peerID, RLP const& _r)
auto iter = m_headerIdToNumber.find(id);
if (iter == m_headerIdToNumber.end() || !haveItem(m_headers, iter->second))
{
LOG(m_loggerDetail) << "Ignored unknown block body";
LOG(m_loggerDetail) << "Ignored unknown block body from " << _peerID;
continue;
}
unsigned blockNumber = iter->second;
if (haveItem(m_bodies, blockNumber))
{
LOG(m_logger) << "Skipping already downloaded block body " << blockNumber;
LOG(m_logger) << "Skipping already downloaded block body " << blockNumber << " from "
<< _peerID;
continue;
}
m_headerIdToNumber.erase(id);
Expand Down Expand Up @@ -705,7 +710,7 @@ void BlockChainSync::collectBlocks()

if (host().bq().unknownFull())
{
clog(VerbosityWarning, "sync") << "Too many unknown blocks, restarting sync";
LOG(m_loggerWarning) << "Too many unknown blocks, restarting sync";
restartSync();
return;
}
Expand Down Expand Up @@ -755,7 +760,8 @@ void BlockChainSync::onPeerNewBlock(NodeID const& _peerID, RLP const& _r)
unsigned blockNumber = static_cast<unsigned>(info.number());
if (blockNumber > (m_lastImportedBlock + 1))
{
LOG(m_loggerDetail) << "Received unknown new block";
LOG(m_loggerDetail) << "Received unknown new block (" << blockNumber << ") from "
<< _peerID;
// Update the hash of highest known block of the peer.
// syncPeer will then request the highest block header to properly restart syncing
peer.setLatestHash(h);
Expand All @@ -781,8 +787,9 @@ void BlockChainSync::onPeerNewBlock(NodeID const& _peerID, RLP const& _r)
{
if (!m_bodies.empty())
{
LOG(m_loggerDetail)
<< "Block headers map is empty, but block bodies map is not. Force-clearing.";
LOG(m_loggerDetail) << "Block headers map is empty, but block bodies map is not. "
"Force-clearing (peer: "
<< _peerID << ")";
m_bodies.clear();
}
completeSync();
Expand Down Expand Up @@ -815,7 +822,9 @@ void BlockChainSync::onPeerNewBlock(NodeID const& _peerID, RLP const& _r)
u256 totalDifficulty = _r[1].toInt<u256>();
if (totalDifficulty > peer.totalDifficulty())
{
LOG(m_loggerDetail) << "Received block with no known parent. Peer needs syncing...";
LOG(m_loggerDetail) << "Received block (" << blockNumber
<< ") with no known parent. Peer " << _peerID
<< " needs syncing...";
syncPeer(_peerID, true);
}
break;
Expand Down Expand Up @@ -882,10 +891,12 @@ void BlockChainSync::onPeerNewHashes(
auto& peer = m_host.peer(_peerID);
if (peer.isConversing())
{
LOG(m_loggerDetail) << "Ignoring new hashes since we're already downloading.";
LOG(m_loggerDetail) << "Ignoring new hashes since we're already downloading from peer "
<< _peerID;
return;
}
LOG(m_loggerDetail) << "Not syncing and new block hash discovered: syncing.";
LOG(m_loggerDetail) << "Not syncing and new block hash discovered: syncing with peer "
<< _peerID;
unsigned knowns = 0;
unsigned unknowns = 0;
unsigned maxHeight = 0;
Expand All @@ -899,7 +910,8 @@ void BlockChainSync::onPeerNewHashes(
knowns++;
else if (status == QueueStatus::Bad)
{
cwarn << "block hash bad!" << h << ". Bailing...";
LOG(m_loggerWarning) << "block hash bad!" << h << ". Bailing... (peer: " << _peerID
<< ")";
return;
}
else if (status == QueueStatus::Unknown)
Expand All @@ -914,10 +926,11 @@ void BlockChainSync::onPeerNewHashes(
else
knowns++;
}
LOG(m_logger) << knowns << " knowns, " << unknowns << " unknowns";
LOG(m_logger) << knowns << " knowns, " << unknowns << " unknowns (peer: " << _peerID << ")";
if (unknowns > 0)
{
LOG(m_loggerDetail) << "Not syncing and new block hash discovered: syncing.";
LOG(m_loggerDetail) << "Not syncing and new block hash discovered: start syncing with "
<< _peerID;
syncPeer(_peerID, true);
}
}
Expand Down
1 change: 1 addition & 0 deletions libethereum/BlockChainSync.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ class BlockChainSync final: public HasInvariants
Logger m_logger{createLogger(VerbosityDebug, "sync")};
Logger m_loggerInfo{createLogger(VerbosityInfo, "sync")};
Logger m_loggerDetail{createLogger(VerbosityTrace, "sync")};
Logger m_loggerWarning{createLogger(VerbosityWarning, "sync")};

private:
static char const* const s_stateNames[static_cast<int>(SyncState::Size)];
Expand Down
Loading