mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-03-17 17:02:43 +00:00
Merge bitcoin/bitcoin#33960: log: Use more severe log level (warn/err) where appropriate
fa45a1503eee603059166071857215ea9bd7242a log: Use LogWarning for non-critical logs (MarcoFalke)
fa0018d01102ad1d358eee20d8bae1e438ceebf8 log: Use LogError for fatal errors (MarcoFalke)
22229de7288fed6369bc70b2af674906e6777ce4 doc: Fix typo in init log (MarcoFalke)
Pull request description:
Logging supports severity levels above info via the legacy `LogPrintf`. So use the more appropriate `LogError` or `LogWarning`, where it applies.
This has a few small benefits:
* It often allows to remove the manual and literal "error: ", "Warning:", ... prefixes. Instead the uniform log level formatting is used.
* It is easier to grep or glance for more severe logs, which indicate some kind of alert.
* `LogPrintf` didn't indicate any severity level, but it is an alias for `LogInfo`. So having the log level explicitly spelled out makes it easier to read the code.
* Also, remove the redundant trailing `\n` newline, while touching.
* Also, remove the `__func__` formatting in the log string, which is redundant with `-logsourcelocations`. Instead, use a unique log string for each location.
ACKs for top commit:
l0rinc:
Code review ACK fa45a1503eee603059166071857215ea9bd7242a
stickies-v:
ACK fa45a1503eee603059166071857215ea9bd7242a
rkrux:
crACK fa45a1503eee603059166071857215ea9bd7242a
Tree-SHA512: 516d439c36716f969c6e82d00bcda03c92c8765a9e41593b90052c86f8fa3a3dacbb2c3dc98bfc862cefa54cae34842b488671a20dd86cf1d15fb94aa5563406
This commit is contained in:
commit
ce771726f3
@ -1055,7 +1055,7 @@ void AddrManImpl::Check() const
|
||||
|
||||
const int err{CheckAddrman()};
|
||||
if (err) {
|
||||
LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err);
|
||||
LogError("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i", err);
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ std::optional<common::SettingsValue> InterpretValue(const KeyInfo& key, const st
|
||||
}
|
||||
// Double negatives like -nofoo=0 are supported (but discouraged)
|
||||
if (value && !InterpretBool(*value)) {
|
||||
LogPrintf("Warning: parsed potentially confusing double-negative -%s=%s\n", key.name, *value);
|
||||
LogWarning("Parsed potentially confusing double-negative -%s=%s", key.name, *value);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -398,7 +398,7 @@ static void SaveErrors(const std::vector<std::string> errors, std::vector<std::s
|
||||
if (error_out) {
|
||||
error_out->emplace_back(error);
|
||||
} else {
|
||||
LogPrintf("%s\n", error);
|
||||
LogWarning("%s", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -420,7 +420,7 @@ bool ArgsManager::ReadSettingsFile(std::vector<std::string>* errors)
|
||||
for (const auto& setting : m_settings.rw_settings) {
|
||||
KeyInfo key = InterpretKey(setting.first); // Split setting key into section and argname
|
||||
if (!GetArgFlags('-' + key.name)) {
|
||||
LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first);
|
||||
LogWarning("Ignoring unknown rw_settings value %s", setting.first);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
@ -84,7 +84,7 @@ bool IsConfSupported(KeyInfo& key, std::string& error) {
|
||||
if (key.name == "reindex") {
|
||||
// reindex can be set in a config file but it is strongly discouraged as this will cause the node to reindex on
|
||||
// every restart. Allow the config but throw a warning
|
||||
LogPrintf("Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary\n");
|
||||
LogWarning("reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary");
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
@ -109,7 +109,7 @@ bool ArgsManager::ReadConfigStream(std::istream& stream, const std::string& file
|
||||
m_settings.ro_config[key.section][key.name].push_back(*value);
|
||||
} else {
|
||||
if (ignore_invalid_keys) {
|
||||
LogPrintf("Ignoring unknown configuration value %s\n", option.first);
|
||||
LogWarning("Ignoring unknown configuration value %s", option.first);
|
||||
} else {
|
||||
error = strprintf("Invalid configuration value %s", option.first);
|
||||
return false;
|
||||
|
||||
@ -58,8 +58,9 @@ void runCommand(const std::string& strCommand)
|
||||
#else
|
||||
int nErr = ::_wsystem(std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().from_bytes(strCommand).c_str());
|
||||
#endif
|
||||
if (nErr)
|
||||
LogPrintf("runCommand error: system(%s) returned %d\n", strCommand, nErr);
|
||||
if (nErr) {
|
||||
LogWarning("runCommand error: system(%s) returned %d", strCommand, nErr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@ -47,8 +47,8 @@ static void HandleError(const leveldb::Status& status)
|
||||
if (status.ok())
|
||||
return;
|
||||
const std::string errmsg = "Fatal LevelDB error: " + status.ToString();
|
||||
LogPrintf("%s\n", errmsg);
|
||||
LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n");
|
||||
LogError("%s", errmsg);
|
||||
LogInfo("You can use -debug=leveldb to get more complete diagnostic messages");
|
||||
throw dbwrapper_error(errmsg);
|
||||
}
|
||||
|
||||
@ -309,7 +309,7 @@ std::optional<std::string> CDBWrapper::ReadImpl(std::span<const std::byte> key)
|
||||
if (!status.ok()) {
|
||||
if (status.IsNotFound())
|
||||
return std::nullopt;
|
||||
LogPrintf("LevelDB read failure: %s\n", status.ToString());
|
||||
LogError("LevelDB read failure: %s", status.ToString());
|
||||
HandleError(status);
|
||||
}
|
||||
return strValue;
|
||||
@ -324,7 +324,7 @@ bool CDBWrapper::ExistsImpl(std::span<const std::byte> key) const
|
||||
if (!status.ok()) {
|
||||
if (status.IsNotFound())
|
||||
return false;
|
||||
LogPrintf("LevelDB read failure: %s\n", status.ToString());
|
||||
LogError("LevelDB read failure: %s", status.ToString());
|
||||
HandleError(status);
|
||||
}
|
||||
return true;
|
||||
|
||||
@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only) const
|
||||
if (!file && !read_only)
|
||||
file = fsbridge::fopen(path, "wb+");
|
||||
if (!file) {
|
||||
LogPrintf("Unable to open file %s\n", fs::PathToString(path));
|
||||
LogError("Unable to open file %s", fs::PathToString(path));
|
||||
return nullptr;
|
||||
}
|
||||
if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) {
|
||||
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path));
|
||||
LogError("Unable to seek to position %u of %s", pos.nPos, fs::PathToString(path));
|
||||
if (fclose(file) != 0) {
|
||||
LogError("Unable to close file %s", fs::PathToString(path));
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req)
|
||||
jreq.context = context;
|
||||
jreq.peerAddr = req->GetPeer().ToStringAddrPort();
|
||||
if (!RPCAuthorized(authHeader.second, jreq.authUser)) {
|
||||
LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr);
|
||||
LogWarning("ThreadRPCServer incorrect password attempt from %s", jreq.peerAddr);
|
||||
|
||||
/* Deter brute-forcing
|
||||
If this results in a DoS the user really
|
||||
@ -144,7 +144,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req)
|
||||
UniValue reply;
|
||||
bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser);
|
||||
if (!user_has_whitelist && g_rpc_whitelist_default) {
|
||||
LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser);
|
||||
LogWarning("RPC User %s not allowed to call any methods", jreq.authUser);
|
||||
req->WriteReply(HTTP_FORBIDDEN);
|
||||
return false;
|
||||
|
||||
@ -152,7 +152,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req)
|
||||
} else if (valRequest.isObject()) {
|
||||
jreq.parse(valRequest);
|
||||
if (user_has_whitelist && !g_rpc_whitelist[jreq.authUser].count(jreq.strMethod)) {
|
||||
LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, jreq.strMethod);
|
||||
LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, jreq.strMethod);
|
||||
req->WriteReply(HTTP_FORBIDDEN);
|
||||
return false;
|
||||
}
|
||||
@ -182,7 +182,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req)
|
||||
// Parse method
|
||||
std::string strMethod = request.find_value("method").get_str();
|
||||
if (!g_rpc_whitelist[jreq.authUser].count(strMethod)) {
|
||||
LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, strMethod);
|
||||
LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, strMethod);
|
||||
req->WriteReply(HTTP_FORBIDDEN);
|
||||
return false;
|
||||
}
|
||||
@ -297,7 +297,7 @@ static bool InitRPCAuthentication()
|
||||
fields.insert(fields.end(), salt_hmac.begin(), salt_hmac.end());
|
||||
g_rpcauth.push_back(fields);
|
||||
} else {
|
||||
LogPrintf("Invalid -rpcauth argument.\n");
|
||||
LogWarning("Invalid -rpcauth argument.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,7 +332,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
|
||||
if (g_work_queue->Enqueue(item.get())) {
|
||||
item.release(); /* if true, queue took ownership */
|
||||
} else {
|
||||
LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n");
|
||||
LogWarning("Request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting");
|
||||
item->req->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded");
|
||||
}
|
||||
} else {
|
||||
@ -372,10 +372,10 @@ static bool HTTPBindAddresses(struct evhttp* http)
|
||||
endpoints.emplace_back("::1", http_port);
|
||||
endpoints.emplace_back("127.0.0.1", http_port);
|
||||
if (!gArgs.GetArgs("-rpcallowip").empty()) {
|
||||
LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n");
|
||||
LogWarning("Option -rpcallowip was specified without -rpcbind; this doesn't usually make sense");
|
||||
}
|
||||
if (!gArgs.GetArgs("-rpcbind").empty()) {
|
||||
LogPrintf("WARNING: option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n");
|
||||
LogWarning("Option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect");
|
||||
}
|
||||
} else { // Specific bind addresses
|
||||
for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) {
|
||||
@ -396,7 +396,7 @@ static bool HTTPBindAddresses(struct evhttp* http)
|
||||
if (bind_handle) {
|
||||
const std::optional<CNetAddr> addr{LookupHost(i->first, false)};
|
||||
if (i->first.empty() || (addr.has_value() && addr->IsBindAny())) {
|
||||
LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n");
|
||||
LogWarning("The RPC server is not safe to expose to untrusted networks such as the public internet");
|
||||
}
|
||||
// Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
|
||||
evutil_socket_t fd = evhttp_bound_socket_get_fd(bind_handle);
|
||||
@ -406,7 +406,7 @@ static bool HTTPBindAddresses(struct evhttp* http)
|
||||
}
|
||||
boundSockets.push_back(bind_handle);
|
||||
} else {
|
||||
LogPrintf("Binding RPC on address %s port %i failed.\n", i->first, i->second);
|
||||
LogWarning("Binding RPC on address %s port %i failed.", i->first, i->second);
|
||||
}
|
||||
}
|
||||
return !boundSockets.empty();
|
||||
@ -462,7 +462,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt)
|
||||
raii_evhttp http_ctr = obtain_evhttp(base_ctr.get());
|
||||
struct evhttp* http = http_ctr.get();
|
||||
if (!http) {
|
||||
LogPrintf("couldn't create evhttp. Exiting.\n");
|
||||
LogError("Couldn't create evhttp. Exiting.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -472,7 +472,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt)
|
||||
evhttp_set_gencb(http, http_request_cb, (void*)&interrupt);
|
||||
|
||||
if (!HTTPBindAddresses(http)) {
|
||||
LogPrintf("Unable to bind any endpoint for RPC server\n");
|
||||
LogError("Unable to bind any endpoint for RPC server");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -602,7 +602,7 @@ HTTPRequest::~HTTPRequest()
|
||||
{
|
||||
if (!replySent) {
|
||||
// Keep track of whether reply was sent to avoid request leaks
|
||||
LogPrintf("%s: Unhandled request\n", __func__);
|
||||
LogWarning("Unhandled HTTP request");
|
||||
WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request");
|
||||
}
|
||||
// evhttpd cleans up the request, as long as a reply was sent.
|
||||
|
||||
16
src/init.cpp
16
src/init.cpp
@ -198,7 +198,7 @@ static void RemovePidFile(const ArgsManager& args)
|
||||
const auto pid_path{GetPidFile(args)};
|
||||
if (std::error_code error; !fs::remove(pid_path, error)) {
|
||||
std::string msg{error ? error.message() : "File does not exist"};
|
||||
LogPrintf("Unable to remove PID file (%s): %s\n", fs::PathToString(pid_path), msg);
|
||||
LogWarning("Unable to remove PID file (%s): %s", fs::PathToString(pid_path), msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1351,7 +1351,7 @@ static ChainstateLoadResult InitAndLoadChainstate(
|
||||
index->Interrupt();
|
||||
index->Stop();
|
||||
if (!(index->Init() && index->StartBackgroundSync())) {
|
||||
LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName());
|
||||
LogWarning("[snapshot] Failed to restart index %s on snapshot chain", index->GetName());
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1415,11 +1415,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
||||
|
||||
// Warn about relative -datadir path.
|
||||
if (args.IsArgSet("-datadir") && !args.GetPathArg("-datadir").is_absolute()) {
|
||||
LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the "
|
||||
"current working directory '%s'. This is fragile, because if bitcoin is started in the future "
|
||||
"from a different location, it will be unable to locate the current data files. There could "
|
||||
"also be data loss if bitcoin is started while in a temporary directory.\n",
|
||||
args.GetArg("-datadir", ""), fs::PathToString(fs::current_path()));
|
||||
LogWarning("Relative datadir option '%s' specified, which will be interpreted relative to the "
|
||||
"current working directory '%s'. This is fragile, because if bitcoin is started in the future "
|
||||
"from a different location, it will be unable to locate the current data files. There could "
|
||||
"also be data loss if bitcoin is started while in a temporary directory.",
|
||||
args.GetArg("-datadir", ""), fs::PathToString(fs::current_path()));
|
||||
}
|
||||
|
||||
assert(!node.scheduler);
|
||||
@ -1881,7 +1881,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
||||
} else {
|
||||
// Prior to setting NODE_NETWORK, check if we can provide historical blocks.
|
||||
if (!WITH_LOCK(chainman.GetMutex(), return chainman.BackgroundSyncInProgress())) {
|
||||
LogInfo("Setting NODE_NETWORK on non-prune mode");
|
||||
LogInfo("Setting NODE_NETWORK in non-prune mode");
|
||||
g_local_services = ServiceFlags(g_local_services | NODE_NETWORK);
|
||||
} else {
|
||||
LogInfo("Running node in NODE_NETWORK_LIMITED mode until snapshot background sync completes");
|
||||
|
||||
@ -116,7 +116,7 @@ int ProcessImpl::connect(const fs::path& data_dir,
|
||||
}
|
||||
int connect_error = errno;
|
||||
if (::close(fd) != 0) {
|
||||
LogPrintf("Error closing file descriptor %i '%s': %s\n", fd, address, SysErrorString(errno));
|
||||
LogWarning("Error closing file descriptor %i '%s': %s", fd, address, SysErrorString(errno));
|
||||
}
|
||||
throw std::system_error(connect_error, std::system_category());
|
||||
}
|
||||
@ -147,7 +147,7 @@ int ProcessImpl::bind(const fs::path& data_dir, const std::string& exe_name, std
|
||||
}
|
||||
int bind_error = errno;
|
||||
if (::close(fd) != 0) {
|
||||
LogPrintf("Error closing file descriptor %i: %s\n", fd, SysErrorString(errno));
|
||||
LogWarning("Error closing file descriptor %i: %s", fd, SysErrorString(errno));
|
||||
}
|
||||
throw std::system_error(bind_error, std::system_category());
|
||||
}
|
||||
|
||||
@ -533,7 +533,7 @@ void BCLog::Logger::ShrinkDebugFile()
|
||||
// Restart the file with some of the end
|
||||
std::vector<char> vch(RECENT_DEBUG_HISTORY_SIZE, 0);
|
||||
if (fseek(file, -((long)vch.size()), SEEK_END)) {
|
||||
LogPrintf("Failed to shrink debug log file: fseek(...) failed\n");
|
||||
LogWarning("Failed to shrink debug log file: fseek(...) failed");
|
||||
fclose(file);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4949,13 +4949,13 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
|
||||
|
||||
if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
|
||||
// We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
|
||||
LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
|
||||
LogWarning("Not punishing noban peer %d!", peer.m_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pnode.IsManualConn()) {
|
||||
// We never disconnect or discourage manual peers for bad behavior
|
||||
LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
|
||||
LogWarning("Not punishing manually connected peer %d!", peer.m_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -103,7 +103,7 @@ enum Network ParseNetwork(const std::string& net_in) {
|
||||
if (net == "ipv6") return NET_IPV6;
|
||||
if (net == "onion") return NET_ONION;
|
||||
if (net == "tor") {
|
||||
LogPrintf("Warning: net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead.\n");
|
||||
LogWarning("Net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead.");
|
||||
return NET_ONION;
|
||||
}
|
||||
if (net == "i2p") {
|
||||
|
||||
@ -1263,7 +1263,7 @@ void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
|
||||
LogWarning("Could not open blocks file %s", fs::PathToString(path));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -151,7 +151,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
|
||||
}
|
||||
LogInfo("Setting nMinimumChainWork=%s", chainman.MinimumChainWork().GetHex());
|
||||
if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) {
|
||||
LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex());
|
||||
LogWarning("nMinimumChainWork set below default value of %s", chainman.GetConsensus().nMinimumChainWork.GetHex());
|
||||
}
|
||||
if (chainman.m_blockman.GetPruneTarget() == BlockManager::PRUNE_TARGET_MANUAL) {
|
||||
LogInfo("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.");
|
||||
|
||||
@ -32,14 +32,14 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate)
|
||||
FILE* file{fsbridge::fopen(write_to, "wb")};
|
||||
AutoFile afile{file};
|
||||
if (afile.IsNull()) {
|
||||
LogPrintf("[snapshot] failed to open base blockhash file for writing: %s\n",
|
||||
LogError("[snapshot] failed to open base blockhash file for writing: %s",
|
||||
fs::PathToString(write_to));
|
||||
return false;
|
||||
}
|
||||
afile << *snapshot_chainstate.m_from_snapshot_blockhash;
|
||||
|
||||
if (afile.fclose() != 0) {
|
||||
LogPrintf("[snapshot] failed to close base blockhash file %s after writing\n",
|
||||
LogError("[snapshot] failed to close base blockhash file %s after writing",
|
||||
fs::PathToString(write_to));
|
||||
return false;
|
||||
}
|
||||
@ -49,16 +49,16 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate)
|
||||
std::optional<uint256> ReadSnapshotBaseBlockhash(fs::path chaindir)
|
||||
{
|
||||
if (!fs::exists(chaindir)) {
|
||||
LogPrintf("[snapshot] cannot read base blockhash: no chainstate dir "
|
||||
"exists at path %s\n", fs::PathToString(chaindir));
|
||||
LogWarning("[snapshot] cannot read base blockhash: no chainstate dir "
|
||||
"exists at path %s", fs::PathToString(chaindir));
|
||||
return std::nullopt;
|
||||
}
|
||||
const fs::path read_from = chaindir / node::SNAPSHOT_BLOCKHASH_FILENAME;
|
||||
const std::string read_from_str = fs::PathToString(read_from);
|
||||
|
||||
if (!fs::exists(read_from)) {
|
||||
LogPrintf("[snapshot] snapshot chainstate dir is malformed! no base blockhash file "
|
||||
"exists at path %s. Try deleting %s and calling loadtxoutset again?\n",
|
||||
LogWarning("[snapshot] snapshot chainstate dir is malformed! no base blockhash file "
|
||||
"exists at path %s. Try deleting %s and calling loadtxoutset again?",
|
||||
fs::PathToString(chaindir), read_from_str);
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -67,7 +67,7 @@ std::optional<uint256> ReadSnapshotBaseBlockhash(fs::path chaindir)
|
||||
FILE* file{fsbridge::fopen(read_from, "rb")};
|
||||
AutoFile afile{file};
|
||||
if (afile.IsNull()) {
|
||||
LogPrintf("[snapshot] failed to open base blockhash file for reading: %s\n",
|
||||
LogWarning("[snapshot] failed to open base blockhash file for reading: %s",
|
||||
read_from_str);
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -76,7 +76,7 @@ std::optional<uint256> ReadSnapshotBaseBlockhash(fs::path chaindir)
|
||||
int64_t position = afile.tell();
|
||||
afile.seek(0, SEEK_END);
|
||||
if (position != afile.tell()) {
|
||||
LogPrintf("[snapshot] warning: unexpected trailing data in %s\n", read_from_str);
|
||||
LogWarning("[snapshot] unexpected trailing data in %s", read_from_str);
|
||||
}
|
||||
return base_blockhash;
|
||||
}
|
||||
|
||||
@ -567,12 +567,12 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath
|
||||
|
||||
std::chrono::hours file_age = GetFeeEstimatorFileAge();
|
||||
if (file_age > MAX_FILE_AGE && !read_stale_estimates) {
|
||||
LogPrintf("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.\n", fs::PathToString(m_estimation_filepath), Ticks<std::chrono::hours>(file_age), Ticks<std::chrono::hours>(MAX_FILE_AGE));
|
||||
LogWarning("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.", fs::PathToString(m_estimation_filepath), Ticks<std::chrono::hours>(file_age), Ticks<std::chrono::hours>(MAX_FILE_AGE));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Read(est_file)) {
|
||||
LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
|
||||
LogWarning("Failed to read fee estimates from %s. Continue anyway.", fs::PathToString(m_estimation_filepath));
|
||||
}
|
||||
}
|
||||
|
||||
@ -964,12 +964,12 @@ void CBlockPolicyEstimator::FlushFeeEstimates()
|
||||
{
|
||||
AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "wb")};
|
||||
if (est_file.IsNull() || !Write(est_file)) {
|
||||
LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
|
||||
LogWarning("Failed to write fee estimates to %s. Continue anyway.", fs::PathToString(m_estimation_filepath));
|
||||
(void)est_file.fclose();
|
||||
return;
|
||||
}
|
||||
if (est_file.fclose() != 0) {
|
||||
LogError("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno));
|
||||
LogWarning("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno));
|
||||
return;
|
||||
}
|
||||
LogInfo("Flushed fee estimates to %s.", fs::PathToString(m_estimation_filepath.filename()));
|
||||
|
||||
22
src/sync.cpp
22
src/sync.cpp
@ -90,8 +90,8 @@ LockData& GetLockData() {
|
||||
|
||||
static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2)
|
||||
{
|
||||
LogPrintf("POTENTIAL DEADLOCK DETECTED\n");
|
||||
LogPrintf("Previous lock order was:\n");
|
||||
LogError("POTENTIAL DEADLOCK DETECTED");
|
||||
LogError("Previous lock order was:");
|
||||
for (const LockStackItem& i : s1) {
|
||||
std::string prefix{};
|
||||
if (i.first == mismatch.first) {
|
||||
@ -100,11 +100,11 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac
|
||||
if (i.first == mismatch.second) {
|
||||
prefix = " (2)";
|
||||
}
|
||||
LogPrintf("%s %s\n", prefix, i.second.ToString());
|
||||
LogError("%s %s", prefix, i.second.ToString());
|
||||
}
|
||||
|
||||
std::string mutex_a, mutex_b;
|
||||
LogPrintf("Current lock order is:\n");
|
||||
LogError("Current lock order is:");
|
||||
for (const LockStackItem& i : s2) {
|
||||
std::string prefix{};
|
||||
if (i.first == mismatch.first) {
|
||||
@ -115,7 +115,7 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac
|
||||
prefix = " (2)";
|
||||
mutex_b = i.second.Name();
|
||||
}
|
||||
LogPrintf("%s %s\n", prefix, i.second.ToString());
|
||||
LogError("%s %s", prefix, i.second.ToString());
|
||||
}
|
||||
if (g_debug_lockorder_abort) {
|
||||
tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString());
|
||||
@ -126,14 +126,14 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac
|
||||
|
||||
static void double_lock_detected(const void* mutex, const LockStack& lock_stack)
|
||||
{
|
||||
LogPrintf("DOUBLE LOCK DETECTED\n");
|
||||
LogPrintf("Lock order:\n");
|
||||
LogError("DOUBLE LOCK DETECTED");
|
||||
LogError("Lock order:");
|
||||
for (const LockStackItem& i : lock_stack) {
|
||||
std::string prefix{};
|
||||
if (i.first == mutex) {
|
||||
prefix = " (*)";
|
||||
}
|
||||
LogPrintf("%s %s\n", prefix, i.second.ToString());
|
||||
LogError("%s %s", prefix, i.second.ToString());
|
||||
}
|
||||
if (g_debug_lockorder_abort) {
|
||||
tfm::format(std::cerr,
|
||||
@ -223,10 +223,10 @@ void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, c
|
||||
}
|
||||
}
|
||||
|
||||
LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n");
|
||||
LogPrintf("Current lock order (least recent first) is:\n");
|
||||
LogError("INCONSISTENT LOCK ORDER DETECTED");
|
||||
LogError("Current lock order (least recent first) is:");
|
||||
for (const LockStackItem& i : lock_stack) {
|
||||
LogPrintf(" %s\n", i.second.ToString());
|
||||
LogError(" %s", i.second.ToString());
|
||||
}
|
||||
if (g_debug_lockorder_abort) {
|
||||
tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname);
|
||||
|
||||
@ -125,7 +125,7 @@ void TorControlConnection::readcb(struct bufferevent *bev, void *ctx)
|
||||
// Do this after evbuffer_readln to make sure all full lines have been
|
||||
// removed from the buffer. Everything left is an incomplete line.
|
||||
if (evbuffer_get_length(input) > MAX_LINE_LENGTH) {
|
||||
LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n");
|
||||
LogWarning("tor: Disconnecting because MAX_LINE_LENGTH exceeded");
|
||||
self->Disconnect();
|
||||
}
|
||||
}
|
||||
@ -155,14 +155,14 @@ bool TorControlConnection::Connect(const std::string& tor_control_center, const
|
||||
|
||||
const std::optional<CService> control_service{Lookup(tor_control_center, DEFAULT_TOR_CONTROL_PORT, fNameLookup)};
|
||||
if (!control_service.has_value()) {
|
||||
LogPrintf("tor: Failed to look up control center %s\n", tor_control_center);
|
||||
LogWarning("tor: Failed to look up control center %s", tor_control_center);
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sockaddr_storage control_address;
|
||||
socklen_t control_address_len = sizeof(control_address);
|
||||
if (!control_service.value().GetSockAddr(reinterpret_cast<struct sockaddr*>(&control_address), &control_address_len)) {
|
||||
LogPrintf("tor: Error parsing socket address %s\n", tor_control_center);
|
||||
LogWarning("tor: Error parsing socket address %s", tor_control_center);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ bool TorControlConnection::Connect(const std::string& tor_control_center, const
|
||||
|
||||
// Finally, connect to tor_control_center
|
||||
if (bufferevent_socket_connect(b_conn, reinterpret_cast<struct sockaddr*>(&control_address), control_address_len) < 0) {
|
||||
LogPrintf("tor: Error connecting to address %s\n", tor_control_center);
|
||||
LogWarning("tor: Error connecting to address %s", tor_control_center);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -330,11 +330,11 @@ TorController::TorController(struct event_base* _base, const std::string& tor_co
|
||||
{
|
||||
reconnect_ev = event_new(base, -1, 0, reconnect_cb, this);
|
||||
if (!reconnect_ev)
|
||||
LogPrintf("tor: Failed to create event for reconnection: out of memory?\n");
|
||||
LogWarning("tor: Failed to create event for reconnection: out of memory?");
|
||||
// Start connection attempts immediately
|
||||
if (!conn.Connect(m_tor_control_center, std::bind(&TorController::connected_cb, this, std::placeholders::_1),
|
||||
std::bind(&TorController::disconnected_cb, this, std::placeholders::_1) )) {
|
||||
LogPrintf("tor: Initiating connection to Tor control port %s failed\n", m_tor_control_center);
|
||||
LogWarning("tor: Initiating connection to Tor control port %s failed", m_tor_control_center);
|
||||
}
|
||||
// Read service private key if cached
|
||||
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
|
||||
@ -382,12 +382,12 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe
|
||||
if (!socks_location.empty()) {
|
||||
LogDebug(BCLog::TOR, "Get SOCKS port command yielded %s\n", socks_location);
|
||||
} else {
|
||||
LogPrintf("tor: Get SOCKS port command returned nothing\n");
|
||||
LogWarning("tor: Get SOCKS port command returned nothing");
|
||||
}
|
||||
} else if (reply.code == TOR_REPLY_UNRECOGNIZED) {
|
||||
LogPrintf("tor: Get SOCKS port command failed with unrecognized command (You probably should upgrade Tor)\n");
|
||||
LogWarning("tor: Get SOCKS port command failed with unrecognized command (You probably should upgrade Tor)");
|
||||
} else {
|
||||
LogPrintf("tor: Get SOCKS port command failed; error code %d\n", reply.code);
|
||||
LogWarning("tor: Get SOCKS port command failed; error code %d", reply.code);
|
||||
}
|
||||
|
||||
CService resolved;
|
||||
@ -439,9 +439,9 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
|
||||
private_key = i->second;
|
||||
}
|
||||
if (service_id.empty()) {
|
||||
LogPrintf("tor: Error parsing ADD_ONION parameters:\n");
|
||||
LogWarning("tor: Error parsing ADD_ONION parameters:");
|
||||
for (const std::string &s : reply.lines) {
|
||||
LogPrintf(" %s\n", SanitizeString(s));
|
||||
LogWarning(" %s", SanitizeString(s));
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -450,14 +450,14 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
|
||||
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
|
||||
LogDebug(BCLog::TOR, "Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
|
||||
} else {
|
||||
LogPrintf("tor: Error writing service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
|
||||
LogWarning("tor: Error writing service private key to %s", fs::PathToString(GetPrivateKeyFile()));
|
||||
}
|
||||
AddLocal(service, LOCAL_MANUAL);
|
||||
// ... onion requested - keep connection open
|
||||
} else if (reply.code == TOR_REPLY_UNRECOGNIZED) {
|
||||
LogPrintf("tor: Add onion failed with unrecognized command (You probably need to upgrade Tor)\n");
|
||||
LogWarning("tor: Add onion failed with unrecognized command (You probably need to upgrade Tor)");
|
||||
} else {
|
||||
LogPrintf("tor: Add onion failed; error code %d\n", reply.code);
|
||||
LogWarning("tor: Add onion failed; error code %d", reply.code);
|
||||
}
|
||||
}
|
||||
|
||||
@ -481,7 +481,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
|
||||
_conn.Command(strprintf("ADD_ONION %s Port=%i,%s", private_key, Params().GetDefaultPort(), m_target.ToStringAddrPort()),
|
||||
std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2));
|
||||
} else {
|
||||
LogPrintf("tor: Authentication failed\n");
|
||||
LogWarning("tor: Authentication failed");
|
||||
}
|
||||
}
|
||||
|
||||
@ -520,30 +520,30 @@ void TorController::authchallenge_cb(TorControlConnection& _conn, const TorContr
|
||||
if (l.first == "AUTHCHALLENGE") {
|
||||
std::map<std::string,std::string> m = ParseTorReplyMapping(l.second);
|
||||
if (m.empty()) {
|
||||
LogPrintf("tor: Error parsing AUTHCHALLENGE parameters: %s\n", SanitizeString(l.second));
|
||||
LogWarning("tor: Error parsing AUTHCHALLENGE parameters: %s", SanitizeString(l.second));
|
||||
return;
|
||||
}
|
||||
std::vector<uint8_t> serverHash = ParseHex(m["SERVERHASH"]);
|
||||
std::vector<uint8_t> serverNonce = ParseHex(m["SERVERNONCE"]);
|
||||
LogDebug(BCLog::TOR, "AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce));
|
||||
if (serverNonce.size() != 32) {
|
||||
LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n");
|
||||
LogWarning("tor: ServerNonce is not 32 bytes, as required by spec");
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<uint8_t> computedServerHash = ComputeResponse(TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce);
|
||||
if (computedServerHash != serverHash) {
|
||||
LogPrintf("tor: ServerHash %s does not match expected ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash));
|
||||
LogWarning("tor: ServerHash %s does not match expected ServerHash %s", HexStr(serverHash), HexStr(computedServerHash));
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<uint8_t> computedClientHash = ComputeResponse(TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce);
|
||||
_conn.Command("AUTHENTICATE " + HexStr(computedClientHash), std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2));
|
||||
} else {
|
||||
LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n");
|
||||
LogWarning("tor: Invalid reply to AUTHCHALLENGE");
|
||||
}
|
||||
} else {
|
||||
LogPrintf("tor: SAFECOOKIE authentication challenge failed\n");
|
||||
LogWarning("tor: SAFECOOKIE authentication challenge failed");
|
||||
}
|
||||
}
|
||||
|
||||
@ -591,7 +591,7 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
|
||||
ReplaceAll(torpassword, "\"", "\\\"");
|
||||
_conn.Command("AUTHENTICATE \"" + torpassword + "\"", std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2));
|
||||
} else {
|
||||
LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n");
|
||||
LogWarning("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available");
|
||||
}
|
||||
} else if (methods.count("NULL")) {
|
||||
LogDebug(BCLog::TOR, "Using NULL authentication\n");
|
||||
@ -608,18 +608,18 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
|
||||
_conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), std::bind(&TorController::authchallenge_cb, this, std::placeholders::_1, std::placeholders::_2));
|
||||
} else {
|
||||
if (status_cookie.first) {
|
||||
LogPrintf("tor: Authentication cookie %s is not exactly %i bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE);
|
||||
LogWarning("tor: Authentication cookie %s is not exactly %i bytes, as is required by the spec", cookiefile, TOR_COOKIE_SIZE);
|
||||
} else {
|
||||
LogPrintf("tor: Authentication cookie %s could not be opened (check permissions)\n", cookiefile);
|
||||
LogWarning("tor: Authentication cookie %s could not be opened (check permissions)", cookiefile);
|
||||
}
|
||||
}
|
||||
} else if (methods.count("HASHEDPASSWORD")) {
|
||||
LogPrintf("tor: The only supported authentication mechanism left is password, but no password provided with -torpassword\n");
|
||||
LogWarning("tor: The only supported authentication mechanism left is password, but no password provided with -torpassword");
|
||||
} else {
|
||||
LogPrintf("tor: No supported authentication method\n");
|
||||
LogWarning("tor: No supported authentication method");
|
||||
}
|
||||
} else {
|
||||
LogPrintf("tor: Requesting protocol info failed\n");
|
||||
LogWarning("tor: Requesting protocol info failed");
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ void TorController::connected_cb(TorControlConnection& _conn)
|
||||
reconnect_timeout = RECONNECT_TIMEOUT_START;
|
||||
// First send a PROTOCOLINFO command to figure out what authentication is expected
|
||||
if (!_conn.Command("PROTOCOLINFO 1", std::bind(&TorController::protocolinfo_cb, this, std::placeholders::_1, std::placeholders::_2)))
|
||||
LogPrintf("tor: Error sending initial protocolinfo command\n");
|
||||
LogWarning("tor: Error sending initial protocolinfo command");
|
||||
}
|
||||
|
||||
void TorController::disconnected_cb(TorControlConnection& _conn)
|
||||
@ -658,7 +658,7 @@ void TorController::Reconnect()
|
||||
*/
|
||||
if (!conn.Connect(m_tor_control_center, std::bind(&TorController::connected_cb, this, std::placeholders::_1),
|
||||
std::bind(&TorController::disconnected_cb, this, std::placeholders::_1) )) {
|
||||
LogPrintf("tor: Re-initiating connection to Tor control port %s failed\n", m_tor_control_center);
|
||||
LogWarning("tor: Re-initiating connection to Tor control port %s failed", m_tor_control_center);
|
||||
}
|
||||
}
|
||||
|
||||
@ -694,7 +694,7 @@ void StartTorControl(CService onion_service_target)
|
||||
#endif
|
||||
gBase = event_base_new();
|
||||
if (!gBase) {
|
||||
LogPrintf("tor: Unable to create event_base\n");
|
||||
LogWarning("tor: Unable to create event_base");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -137,7 +137,7 @@ bool CCoinsViewDB::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &hashB
|
||||
if (m_options.simulate_crash_ratio) {
|
||||
static FastRandomContext rng;
|
||||
if (rng.randrange(m_options.simulate_crash_ratio) == 0) {
|
||||
LogPrintf("Simulating a crash. Goodbye.\n");
|
||||
LogError("Simulating a crash. Goodbye.");
|
||||
_Exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ void ScheduleBatchPriority()
|
||||
const static sched_param param{};
|
||||
const int rc = pthread_setschedparam(pthread_self(), SCHED_BATCH, ¶m);
|
||||
if (rc != 0) {
|
||||
LogPrintf("Failed to pthread_setschedparam: %s\n", SysErrorString(rc));
|
||||
LogWarning("Failed to pthread_setschedparam: %s", SysErrorString(rc));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -36,6 +36,6 @@ static std::string FormatException(const std::exception* pex, std::string_view t
|
||||
void PrintExceptionContinue(const std::exception* pex, std::string_view thread_name)
|
||||
{
|
||||
std::string message = FormatException(pex, thread_name);
|
||||
LogPrintf("\n\n************************\n%s\n", message);
|
||||
LogWarning("\n\n************************\n%s", message);
|
||||
tfm::format(std::cerr, "\n\n************************\n%s\n", message);
|
||||
}
|
||||
|
||||
@ -102,28 +102,28 @@ std::streampos GetFileSize(const char* path, std::streamsize max)
|
||||
bool FileCommit(FILE* file)
|
||||
{
|
||||
if (fflush(file) != 0) { // harmless if redundantly called
|
||||
LogPrintf("fflush failed: %s\n", SysErrorString(errno));
|
||||
LogError("fflush failed: %s", SysErrorString(errno));
|
||||
return false;
|
||||
}
|
||||
#ifdef WIN32
|
||||
HANDLE hFile = (HANDLE)_get_osfhandle(_fileno(file));
|
||||
if (FlushFileBuffers(hFile) == 0) {
|
||||
LogPrintf("FlushFileBuffers failed: %s\n", Win32ErrorString(GetLastError()));
|
||||
LogError("FlushFileBuffers failed: %s", Win32ErrorString(GetLastError()));
|
||||
return false;
|
||||
}
|
||||
#elif defined(__APPLE__) && defined(F_FULLFSYNC)
|
||||
if (fcntl(fileno(file), F_FULLFSYNC, 0) == -1) { // Manpage says "value other than -1" is returned on success
|
||||
LogPrintf("fcntl F_FULLFSYNC failed: %s\n", SysErrorString(errno));
|
||||
LogError("fcntl F_FULLFSYNC failed: %s", SysErrorString(errno));
|
||||
return false;
|
||||
}
|
||||
#elif HAVE_FDATASYNC
|
||||
if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync
|
||||
LogPrintf("fdatasync failed: %s\n", SysErrorString(errno));
|
||||
LogError("fdatasync failed: %s", SysErrorString(errno));
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
if (fsync(fileno(file)) != 0 && errno != EINVAL) {
|
||||
LogPrintf("fsync failed: %s\n", SysErrorString(errno));
|
||||
LogError("fsync failed: %s", SysErrorString(errno));
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
@ -235,7 +235,7 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate)
|
||||
return fs::path(pszPath);
|
||||
}
|
||||
|
||||
LogPrintf("SHGetSpecialFolderPathW() failed, could not obtain requested path.\n");
|
||||
LogError("SHGetSpecialFolderPathW() failed, could not obtain requested path.");
|
||||
return fs::path("");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -409,7 +409,7 @@ void Sock::Close()
|
||||
int ret = close(m_socket);
|
||||
#endif
|
||||
if (ret) {
|
||||
LogPrintf("Error closing socket %d: %s\n", m_socket, NetworkErrorString(WSAGetLastError()));
|
||||
LogWarning("Error closing socket %d: %s", m_socket, NetworkErrorString(WSAGetLastError()));
|
||||
}
|
||||
m_socket = INVALID_SOCKET;
|
||||
}
|
||||
|
||||
@ -1196,7 +1196,7 @@ bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws)
|
||||
script_verify_flags currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)};
|
||||
if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags,
|
||||
ws.m_precomputed_txdata, m_active_chainstate.CoinsTip(), GetValidationCache())) {
|
||||
LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString());
|
||||
LogError("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", hash.ToString(), state.ToString());
|
||||
return Assume(false);
|
||||
}
|
||||
|
||||
@ -1966,7 +1966,7 @@ void Chainstate::CheckForkWarningConditions()
|
||||
}
|
||||
|
||||
if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
|
||||
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
|
||||
LogWarning("Found invalid chain at least ~6 blocks longer than our best chain. Chain state database corruption likely.");
|
||||
m_chainman.GetNotifications().warningSet(
|
||||
kernel::Warning::LARGE_WORK_INVALID_CHAIN,
|
||||
_("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade."));
|
||||
@ -2862,7 +2862,7 @@ void Chainstate::ForceFlushStateToDisk()
|
||||
{
|
||||
BlockValidationState state;
|
||||
if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
|
||||
LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
|
||||
LogWarning("Failed to force flush state (%s)", state.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2871,7 +2871,7 @@ void Chainstate::PruneAndFlush()
|
||||
BlockValidationState state;
|
||||
m_blockman.m_check_for_pruning = true;
|
||||
if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
|
||||
LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
|
||||
LogWarning("Failed to flush state (%s)", state.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -3378,8 +3378,8 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<
|
||||
// Belt-and-suspenders check that we aren't attempting to advance the background
|
||||
// chainstate past the snapshot base block.
|
||||
if (WITH_LOCK(::cs_main, return m_disabled)) {
|
||||
LogPrintf("m_disabled is set - this chainstate should not be in operation. "
|
||||
"Please report this as a bug. %s\n", CLIENT_BUGREPORT);
|
||||
LogError("m_disabled is set - this chainstate should not be in operation. "
|
||||
"Please report this as a bug. %s", CLIENT_BUGREPORT);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -4584,7 +4584,7 @@ void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight
|
||||
BlockValidationState state;
|
||||
if (!active_chainstate.FlushStateToDisk(
|
||||
state, FlushStateMode::NONE, nManualPruneHeight)) {
|
||||
LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
|
||||
LogWarning("Failed to flush state after manual prune (%s)", state.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4702,12 +4702,12 @@ VerifyDBResult CVerifyDB::VerifyDB(
|
||||
CBlock block;
|
||||
// check level 0: read from disk
|
||||
if (!chainstate.m_blockman.ReadBlock(block, *pindex)) {
|
||||
LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogError("Verification error: ReadBlock failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
// check level 1: verify block validity
|
||||
if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) {
|
||||
LogPrintf("Verification error: found bad block at %d, hash=%s (%s)\n",
|
||||
LogError("Verification error: found bad block at %d, hash=%s (%s)",
|
||||
pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
@ -4716,7 +4716,7 @@ VerifyDBResult CVerifyDB::VerifyDB(
|
||||
CBlockUndo undo;
|
||||
if (!pindex->GetUndoPos().IsNull()) {
|
||||
if (!chainstate.m_blockman.ReadBlockUndo(undo, *pindex)) {
|
||||
LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogError("Verification error: found bad undo data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
}
|
||||
@ -4729,7 +4729,7 @@ VerifyDBResult CVerifyDB::VerifyDB(
|
||||
assert(coins.GetBestBlock() == pindex->GetBlockHash());
|
||||
DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
|
||||
if (res == DISCONNECT_FAILED) {
|
||||
LogPrintf("Verification error: irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogError("Verification error: irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
if (res == DISCONNECT_UNCLEAN) {
|
||||
@ -4745,11 +4745,11 @@ VerifyDBResult CVerifyDB::VerifyDB(
|
||||
if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED;
|
||||
}
|
||||
if (pindexFailure) {
|
||||
LogPrintf("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
|
||||
LogError("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
if (skipped_l3_checks) {
|
||||
LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n");
|
||||
LogWarning("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.");
|
||||
}
|
||||
|
||||
// store block count as we move pindex at check level >= 4
|
||||
@ -4768,11 +4768,11 @@ VerifyDBResult CVerifyDB::VerifyDB(
|
||||
pindex = chainstate.m_chain.Next(pindex);
|
||||
CBlock block;
|
||||
if (!chainstate.m_blockman.ReadBlock(block, *pindex)) {
|
||||
LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
LogError("Verification error: ReadBlock failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
|
||||
LogPrintf("Verification error: found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
|
||||
LogError("Verification error: found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
|
||||
return VerifyDBResult::CORRUPTED_BLOCK_DB;
|
||||
}
|
||||
if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED;
|
||||
@ -5618,7 +5618,7 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool)
|
||||
try {
|
||||
bool existed = fs::remove(base_blockhash_path);
|
||||
if (!existed) {
|
||||
LogPrintf("[snapshot] snapshot chainstate dir being removed lacks %s file\n",
|
||||
LogWarning("[snapshot] snapshot chainstate dir being removed lacks %s file",
|
||||
fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME));
|
||||
}
|
||||
} catch (const fs::filesystem_error& e) {
|
||||
@ -5635,7 +5635,7 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool)
|
||||
const bool destroyed = DestroyDB(path_str);
|
||||
|
||||
if (!destroyed) {
|
||||
LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);
|
||||
LogError("leveldb DestroyDB call failed on %s", path_str);
|
||||
}
|
||||
|
||||
// Datadir should be removed from filesystem; otherwise initialization may detect
|
||||
@ -6096,8 +6096,8 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
|
||||
};
|
||||
|
||||
if (index_new.GetBlockHash() != snapshot_blockhash) {
|
||||
LogPrintf("[snapshot] supposed base block %s does not match the "
|
||||
"snapshot base block %s (height %d). Snapshot is not valid.\n",
|
||||
LogWarning("[snapshot] supposed base block %s does not match the "
|
||||
"snapshot base block %s (height %d). Snapshot is not valid.",
|
||||
index_new.ToString(), snapshot_blockhash.ToString(), snapshot_base_height);
|
||||
handle_invalid_snapshot();
|
||||
return SnapshotCompletionResult::BASE_BLOCKHASH_MISMATCH;
|
||||
@ -6117,8 +6117,8 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
|
||||
|
||||
const auto& maybe_au_data = m_options.chainparams.AssumeutxoForHeight(curr_height);
|
||||
if (!maybe_au_data) {
|
||||
LogPrintf("[snapshot] assumeutxo data not found for height "
|
||||
"(%d) - refusing to validate snapshot\n", curr_height);
|
||||
LogWarning("[snapshot] assumeutxo data not found for height "
|
||||
"(%d) - refusing to validate snapshot", curr_height);
|
||||
handle_invalid_snapshot();
|
||||
return SnapshotCompletionResult::MISSING_CHAINPARAMS;
|
||||
}
|
||||
@ -6139,7 +6139,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
|
||||
|
||||
// XXX note that this function is slow and will hold cs_main for potentially minutes.
|
||||
if (!maybe_ibd_stats) {
|
||||
LogPrintf("[snapshot] failed to generate stats for validation coins db\n");
|
||||
LogWarning("[snapshot] failed to generate stats for validation coins db");
|
||||
// While this isn't a problem with the snapshot per se, this condition
|
||||
// prevents us from validating the snapshot, so we should shut down and let the
|
||||
// user handle the issue manually.
|
||||
@ -6155,7 +6155,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
|
||||
// hash for the snapshot when it's loaded in its chainstate's leveldb. We could then
|
||||
// reference that here for an additional check.
|
||||
if (AssumeutxoHash{ibd_stats.hashSerialized} != au_data.hash_serialized) {
|
||||
LogPrintf("[snapshot] hash mismatch: actual=%s, expected=%s\n",
|
||||
LogWarning("[snapshot] hash mismatch: actual=%s, expected=%s",
|
||||
ibd_stats.hashSerialized.ToString(),
|
||||
au_data.hash_serialized.ToString());
|
||||
handle_invalid_snapshot();
|
||||
@ -6334,8 +6334,8 @@ util::Result<void> Chainstate::InvalidateCoinsDBOnDisk()
|
||||
auto src_str = fs::PathToString(snapshot_datadir);
|
||||
auto dest_str = fs::PathToString(invalid_path);
|
||||
|
||||
LogPrintf("%s: error renaming file '%s' -> '%s': %s\n",
|
||||
__func__, src_str, dest_str, e.what());
|
||||
LogError("While invalidating the coins db: Error renaming file '%s' -> '%s': %s",
|
||||
src_str, dest_str, e.what());
|
||||
return util::Error{strprintf(_(
|
||||
"Rename of '%s' -> '%s' failed. "
|
||||
"You should resolve this by manually moving or deleting the invalid "
|
||||
@ -6354,7 +6354,7 @@ bool ChainstateManager::DeleteSnapshotChainstate()
|
||||
|
||||
fs::path snapshot_datadir = Assert(node::FindSnapshotChainstateDir(m_options.datadir)).value();
|
||||
if (!DeleteCoinsDBFromDisk(snapshot_datadir, /*is_snapshot=*/ true)) {
|
||||
LogPrintf("Deletion of %s failed. Please remove it manually to continue reindexing.\n",
|
||||
LogError("Deletion of %s failed. Please remove it manually to continue reindexing.",
|
||||
fs::PathToString(snapshot_datadir));
|
||||
return false;
|
||||
}
|
||||
@ -6416,8 +6416,8 @@ bool ChainstateManager::ValidatedSnapshotCleanup()
|
||||
// is in-memory, in which case we can't do on-disk cleanup. You'd better be
|
||||
// in a unittest!
|
||||
if (!ibd_chainstate_path_maybe || !snapshot_chainstate_path_maybe) {
|
||||
LogPrintf("[snapshot] snapshot chainstate cleanup cannot happen with "
|
||||
"in-memory chainstates. You are testing, right?\n");
|
||||
LogError("[snapshot] snapshot chainstate cleanup cannot happen with "
|
||||
"in-memory chainstates. You are testing, right?");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -6473,8 +6473,8 @@ bool ChainstateManager::ValidatedSnapshotCleanup()
|
||||
if (!DeleteCoinsDBFromDisk(tmp_old, /*is_snapshot=*/false)) {
|
||||
// No need to FatalError because once the unneeded bg chainstate data is
|
||||
// moved, it will not interfere with subsequent initialization.
|
||||
LogPrintf("Deletion of %s failed. Please remove it manually, as the "
|
||||
"directory is now unnecessary.\n",
|
||||
LogWarning("Deletion of %s failed. Please remove it manually, as the "
|
||||
"directory is now unnecessary.",
|
||||
fs::PathToString(tmp_old));
|
||||
} else {
|
||||
LogInfo("[snapshot] deleted background chainstate directory (%s)",
|
||||
|
||||
@ -718,7 +718,7 @@ bool BerkeleyRODatabase::Backup(const std::string& dest) const
|
||||
}
|
||||
try {
|
||||
if (fs::exists(dst) && fs::equivalent(src, dst)) {
|
||||
LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(dst));
|
||||
LogWarning("cannot backup to wallet source file %s", fs::PathToString(dst));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -240,7 +240,7 @@ bool LegacyDataSPKM::CheckDecryptionKey(const CKeyingMaterial& master_key)
|
||||
}
|
||||
if (keyPass && keyFail)
|
||||
{
|
||||
LogPrintf("The wallet is probably corrupted: Some keys decrypt but not all.\n");
|
||||
LogWarning("The wallet is probably corrupted: Some keys decrypt but not all.");
|
||||
throw std::runtime_error("Error unlocking wallet: some keys decrypt but not all. Your wallet file may be corrupt.");
|
||||
}
|
||||
if (keyFail || !keyPass)
|
||||
@ -570,7 +570,7 @@ std::optional<MigrationData> LegacyDataSPKM::MigrateToDescriptor()
|
||||
|
||||
WalletBatch batch(m_storage.GetDatabase());
|
||||
if (!batch.TxnBegin()) {
|
||||
LogPrintf("Error generating descriptors for migration, cannot initialize db transaction\n");
|
||||
LogWarning("Error generating descriptors for migration, cannot initialize db transaction");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@ -755,7 +755,7 @@ std::optional<MigrationData> LegacyDataSPKM::MigrateToDescriptor()
|
||||
|
||||
// Make sure that we have accounted for all scriptPubKeys
|
||||
if (!Assume(spks.empty())) {
|
||||
LogPrintf("%s\n", STR_INTERNAL_BUG("Error: Some output scripts were not migrated.\n"));
|
||||
LogError("%s", STR_INTERNAL_BUG("Error: Some output scripts were not migrated."));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@ -809,7 +809,7 @@ std::optional<MigrationData> LegacyDataSPKM::MigrateToDescriptor()
|
||||
|
||||
// Finalize transaction
|
||||
if (!batch.TxnCommit()) {
|
||||
LogPrintf("Error generating descriptors for migration, cannot commit db transaction\n");
|
||||
LogWarning("Error generating descriptors for migration, cannot commit db transaction");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
@ -889,7 +889,7 @@ bool DescriptorScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master
|
||||
break;
|
||||
}
|
||||
if (keyPass && keyFail) {
|
||||
LogPrintf("The wallet is probably corrupted: Some keys decrypt but not all.\n");
|
||||
LogWarning("The wallet is probably corrupted: Some keys decrypt but not all.");
|
||||
throw std::runtime_error("Error unlocking wallet: some keys decrypt but not all. Your wallet file may be corrupt.");
|
||||
}
|
||||
if (keyFail || !keyPass) {
|
||||
|
||||
@ -40,7 +40,7 @@ static void ErrorLogCallback(void* arg, int code, const char* msg)
|
||||
// invoked."
|
||||
// Assert that this is the case:
|
||||
assert(arg == nullptr);
|
||||
LogPrintf("SQLite Error. Code: %d. Message: %s\n", code, msg);
|
||||
LogWarning("SQLite Error. Code: %d. Message: %s", code, msg);
|
||||
}
|
||||
|
||||
static int TraceSqlCallback(unsigned code, void* context, void* param1, void* param2)
|
||||
@ -69,7 +69,7 @@ static bool BindBlobToStatement(sqlite3_stmt* stmt,
|
||||
// instead of the empty blob value X'', which would mess up SQL comparisons.
|
||||
int res = sqlite3_bind_blob(stmt, index, blob.data() ? static_cast<const void*>(blob.data()) : "", blob.size(), SQLITE_STATIC);
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("Unable to bind %s to statement: %s\n", description, sqlite3_errstr(res));
|
||||
LogWarning("Unable to bind %s to statement: %s", description, sqlite3_errstr(res));
|
||||
sqlite3_clear_bindings(stmt);
|
||||
sqlite3_reset(stmt);
|
||||
return false;
|
||||
@ -179,7 +179,7 @@ void SQLiteDatabase::Cleanup() noexcept
|
||||
if (--g_sqlite_count == 0) {
|
||||
int ret = sqlite3_shutdown();
|
||||
if (ret != SQLITE_OK) {
|
||||
LogPrintf("SQLiteDatabase: Failed to shutdown SQLite: %s\n", sqlite3_errstr(ret));
|
||||
LogWarning("SQLiteDatabase: Failed to shutdown SQLite: %s", sqlite3_errstr(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -264,7 +264,7 @@ void SQLiteDatabase::Open()
|
||||
if (LogAcceptCategory(BCLog::WALLETDB, BCLog::Level::Trace)) {
|
||||
ret = sqlite3_trace_v2(m_db, SQLITE_TRACE_STMT, TraceSqlCallback, this);
|
||||
if (ret != SQLITE_OK) {
|
||||
LogPrintf("Failed to enable SQL tracing for %s\n", Filename());
|
||||
LogWarning("Failed to enable SQL tracing for %s", Filename());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -291,7 +291,7 @@ void SQLiteDatabase::Open()
|
||||
|
||||
if (m_use_unsafe_sync) {
|
||||
// Use normal synchronous mode for the journal
|
||||
LogPrintf("WARNING SQLite is configured to not wait for data to be flushed to disk. Data loss and corruption may occur.\n");
|
||||
LogWarning("SQLite is configured to not wait for data to be flushed to disk. Data loss and corruption may occur.");
|
||||
SetPragma(m_db, "synchronous", "OFF", "Failed to set synchronous mode to OFF");
|
||||
}
|
||||
|
||||
@ -350,14 +350,14 @@ bool SQLiteDatabase::Backup(const std::string& dest) const
|
||||
}
|
||||
sqlite3_backup* backup = sqlite3_backup_init(db_copy, "main", m_db, "main");
|
||||
if (!backup) {
|
||||
LogPrintf("%s: Unable to begin backup: %s\n", __func__, sqlite3_errmsg(m_db));
|
||||
LogWarning("Unable to begin sqlite backup: %s", sqlite3_errmsg(m_db));
|
||||
sqlite3_close(db_copy);
|
||||
return false;
|
||||
}
|
||||
// Specifying -1 will copy all of the pages
|
||||
res = sqlite3_backup_step(backup, -1);
|
||||
if (res != SQLITE_DONE) {
|
||||
LogPrintf("%s: Unable to backup: %s\n", __func__, sqlite3_errstr(res));
|
||||
LogWarning("Unable to continue sqlite backup: %s", sqlite3_errstr(res));
|
||||
sqlite3_backup_finish(backup);
|
||||
sqlite3_close(db_copy);
|
||||
return false;
|
||||
@ -409,13 +409,13 @@ void SQLiteBatch::Close()
|
||||
// If we began a transaction, and it wasn't committed, abort the transaction in progress
|
||||
if (m_txn) {
|
||||
if (TxnAbort()) {
|
||||
LogPrintf("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted\n");
|
||||
LogWarning("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted");
|
||||
} else {
|
||||
// If transaction cannot be aborted, it means there is a bug or there has been data corruption. Try to recover in this case
|
||||
// by closing and reopening the database. Closing the database should also ensure that any changes made since the transaction
|
||||
// was opened will be rolled back and future transactions can succeed without committing old data.
|
||||
force_conn_refresh = true;
|
||||
LogPrintf("SQLiteBatch: Batch closed and failed to abort transaction, resetting db connection..\n");
|
||||
LogWarning("SQLiteBatch: Batch closed and failed to abort transaction, resetting db connection..");
|
||||
}
|
||||
}
|
||||
|
||||
@ -431,7 +431,7 @@ void SQLiteBatch::Close()
|
||||
for (const auto& [stmt_prepared, stmt_description] : statements) {
|
||||
int res = sqlite3_finalize(*stmt_prepared);
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("SQLiteBatch: Batch closed but could not finalize %s statement: %s\n",
|
||||
LogWarning("SQLiteBatch: Batch closed but could not finalize %s statement: %s",
|
||||
stmt_description, sqlite3_errstr(res));
|
||||
}
|
||||
*stmt_prepared = nullptr;
|
||||
@ -462,7 +462,7 @@ bool SQLiteBatch::ReadKey(DataStream&& key, DataStream& value)
|
||||
if (res != SQLITE_ROW) {
|
||||
if (res != SQLITE_DONE) {
|
||||
// SQLITE_DONE means "not found", don't log an error in that case.
|
||||
LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res));
|
||||
LogWarning("Unable to execute read statement: %s", sqlite3_errstr(res));
|
||||
}
|
||||
sqlite3_clear_bindings(m_read_stmt);
|
||||
sqlite3_reset(m_read_stmt);
|
||||
@ -502,7 +502,7 @@ bool SQLiteBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite)
|
||||
sqlite3_clear_bindings(stmt);
|
||||
sqlite3_reset(stmt);
|
||||
if (res != SQLITE_DONE) {
|
||||
LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res));
|
||||
LogWarning("Unable to execute write statement: %s", sqlite3_errstr(res));
|
||||
}
|
||||
|
||||
if (!m_txn) m_database.m_write_semaphore.release();
|
||||
@ -526,7 +526,7 @@ bool SQLiteBatch::ExecStatement(sqlite3_stmt* stmt, std::span<const std::byte> b
|
||||
sqlite3_clear_bindings(stmt);
|
||||
sqlite3_reset(stmt);
|
||||
if (res != SQLITE_DONE) {
|
||||
LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res));
|
||||
LogWarning("Unable to execute exec statement: %s", sqlite3_errstr(res));
|
||||
}
|
||||
|
||||
if (!m_txn) m_database.m_write_semaphore.release();
|
||||
@ -564,7 +564,7 @@ DatabaseCursor::Status SQLiteCursor::Next(DataStream& key, DataStream& value)
|
||||
return Status::DONE;
|
||||
}
|
||||
if (res != SQLITE_ROW) {
|
||||
LogPrintf("%s: Unable to execute cursor step: %s\n", __func__, sqlite3_errstr(res));
|
||||
LogWarning("Unable to execute cursor step: %s", sqlite3_errstr(res));
|
||||
return Status::FAIL;
|
||||
}
|
||||
|
||||
@ -583,8 +583,8 @@ SQLiteCursor::~SQLiteCursor()
|
||||
sqlite3_reset(m_cursor_stmt);
|
||||
int res = sqlite3_finalize(m_cursor_stmt);
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("%s: cursor closed but could not finalize cursor statement: %s\n",
|
||||
__func__, sqlite3_errstr(res));
|
||||
LogWarning("Cursor closed but could not finalize cursor statement: %s",
|
||||
sqlite3_errstr(res));
|
||||
}
|
||||
}
|
||||
|
||||
@ -652,7 +652,7 @@ bool SQLiteBatch::TxnBegin()
|
||||
Assert(!m_database.HasActiveTxn());
|
||||
int res = Assert(m_exec_handler)->Exec(m_database, "BEGIN TRANSACTION");
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("SQLiteBatch: Failed to begin the transaction\n");
|
||||
LogWarning("SQLiteBatch: Failed to begin the transaction");
|
||||
m_database.m_write_semaphore.release();
|
||||
} else {
|
||||
m_txn = true;
|
||||
@ -666,7 +666,7 @@ bool SQLiteBatch::TxnCommit()
|
||||
Assert(m_database.HasActiveTxn());
|
||||
int res = Assert(m_exec_handler)->Exec(m_database, "COMMIT TRANSACTION");
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("SQLiteBatch: Failed to commit the transaction\n");
|
||||
LogWarning("SQLiteBatch: Failed to commit the transaction");
|
||||
} else {
|
||||
m_txn = false;
|
||||
m_database.m_write_semaphore.release();
|
||||
@ -680,7 +680,7 @@ bool SQLiteBatch::TxnAbort()
|
||||
Assert(m_database.HasActiveTxn());
|
||||
int res = Assert(m_exec_handler)->Exec(m_database, "ROLLBACK TRANSACTION");
|
||||
if (res != SQLITE_OK) {
|
||||
LogPrintf("SQLiteBatch: Failed to abort the transaction\n");
|
||||
LogWarning("SQLiteBatch: Failed to abort the transaction");
|
||||
} else {
|
||||
m_txn = false;
|
||||
m_database.m_write_semaphore.release();
|
||||
|
||||
@ -129,7 +129,7 @@ class ConfArgsTest(BitcoinTestFramework):
|
||||
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
|
||||
conf.write('reindex=1\n')
|
||||
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary']):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["[warning] reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary"]):
|
||||
self.start_node(0)
|
||||
self.stop_node(0)
|
||||
|
||||
@ -229,7 +229,7 @@ class ConfArgsTest(BitcoinTestFramework):
|
||||
)
|
||||
|
||||
def test_log_buffer(self):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -listen=0\n']):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["[warning] Parsed potentially confusing double-negative -listen=0\n"]):
|
||||
self.start_node(0, extra_args=['-nolisten=0'])
|
||||
self.stop_node(0)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user