Skip to content

Commit

Permalink
allow the same file for input and output (append), use std filesystem…
Browse files Browse the repository at this point in the history
…, fix docs
  • Loading branch information
ThomasBrady committed Oct 3, 2024
1 parent a619942 commit 4de2273
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 37 deletions.
2 changes: 1 addition & 1 deletion docs/software/commands.md
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ apply.
to write the trusted checkpoint hashes to. The file will contain a JSON array
of arrays, where each inner array contains the ledger number and the corresponding
checkpoint hash of the form `[[999, "hash-abc"], [935, "hash-def"], ... [0, "hash-xyz]]`.
* Option **--trusted-checkpoint-hashes <FILE-NAME>** is optional. If provided,
* Option **--trusted-checkpoint-file <FILE-NAME>** is optional. If provided,
stellar-core will parse the latest checkpoint ledger number and hash from the file and verify from this ledger to the latest checkpoint ledger obtained from the network.
* Option **--from-ledger <LEDGER-NUMBER>** is optional and specifies the ledger
number to start the verification from.
Expand Down
88 changes: 60 additions & 28 deletions src/historywork/WriteVerifiedCheckpointHashesWork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,63 +22,67 @@ namespace stellar
{
std::optional<LedgerNumHashPair>
WriteVerifiedCheckpointHashesWork::loadLatestHashPairFromJsonOutput(
std::string const& filename)
std::filesystem::path const& path)
{
if (!std::filesystem::exists(filename))
if (!std::filesystem::exists(path))
{
return std::nullopt;
throw std::runtime_error("file not found: " + path.string());
}

std::ifstream in(filename);
std::ifstream in(path);
Json::Value root;
Json::Reader rdr;
if (!rdr.parse(in, root))
{
throw std::runtime_error("failed to parse JSON input " + filename);
throw std::runtime_error("failed to parse JSON input " + path.string());
}
if (!root.isArray())
{
throw std::runtime_error("expected top-level array in " + filename);
throw std::runtime_error("expected top-level array in " +
path.string());
}
if (root.size() < 1)
if (root.size() < 2)
{
return std::nullopt;
throw std::runtime_error(
"expected at least one trusted ledger, hash pair in " +
path.string());
}
// Latest hash is the first element in the array.
auto const& jpair = root[0];
if (!jpair.isArray() || (jpair.size() != 2))
{
throw std::runtime_error("expecting 2-element sub-array in " +
filename);
path.string());
}
return {{jpair[0].asUInt(), hexToBin256(jpair[1].asString())}};
}

Hash
WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput(
uint32_t seq, std::string const& filename)
uint32_t seq, std::filesystem::path const& path)
{
std::ifstream in(filename);
std::ifstream in(path);
if (!in)
{
throw std::runtime_error("error opening " + filename);
throw std::runtime_error("error opening " + path.string());
}
Json::Value root;
Json::Reader rdr;
if (!rdr.parse(in, root))
{
throw std::runtime_error("failed to parse JSON input " + filename);
throw std::runtime_error("failed to parse JSON input " + path.string());
}
if (!root.isArray())
{
throw std::runtime_error("expected top-level array in " + filename);
throw std::runtime_error("expected top-level array in " +
path.string());
}
for (auto const& jpair : root)
{
if (!jpair.isArray() || (jpair.size() != 2))
{
throw std::runtime_error("expecting 2-element sub-array in " +
filename);
path.string());
}
if (jpair[0].asUInt() == seq)
{
Expand All @@ -89,8 +93,9 @@ WriteVerifiedCheckpointHashesWork::loadHashFromJsonOutput(
}

WriteVerifiedCheckpointHashesWork::WriteVerifiedCheckpointHashesWork(
Application& app, LedgerNumHashPair rangeEnd, std::string const& outputFile,
std::optional<std::string> const& trustedHashFile,
Application& app, LedgerNumHashPair rangeEnd,
std::filesystem::path const& outputFile,
std::optional<std::filesystem::path> const& trustedHashFile,
std::optional<uint32_t> const& fromLedger, uint32_t nestedBatchSize,
std::shared_ptr<HistoryArchive> archive)
: BatchWork(app, "write-verified-checkpoint-hashes")
Expand All @@ -100,8 +105,8 @@ WriteVerifiedCheckpointHashesWork::WriteVerifiedCheckpointHashesWork(
, mRangeEndFuture(mRangeEndPromise.get_future().share())
, mCurrCheckpoint(rangeEnd.first)
, mArchive(archive)
, mTrustedHashFileName(trustedHashFile)
, mOutputFileName(outputFile)
, mTrustedHashPath(trustedHashFile)
, mOutputPath(outputFile)
, mFromLedger(fromLedger)
{
mRangeEndPromise.set_value(mRangeEnd);
Expand Down Expand Up @@ -238,25 +243,35 @@ WriteVerifiedCheckpointHashesWork::startOutputFile()
{
releaseAssert(!mOutputFile);
auto mode = std::ios::out | std::ios::trunc;
mOutputFile = std::make_shared<std::ofstream>(mOutputFileName, mode);
// If the output file is the same as the trusted hash file, write to a
// temporary file first.
// In endOutputFile we will rename the temporary file to the trusted hash
// file name.
if (mTrustedHashPath && mOutputPath == *mTrustedHashPath)
{
mAppendToFile = true;
mOutputPath += ".tmp";
}
mOutputFile = std::make_shared<std::ofstream>(mOutputPath, mode);
if (!*mOutputFile)
{
throw std::runtime_error("error opening output file " +
mOutputFileName);
mOutputPath.string());
}
(*mOutputFile) << "[";
}

void
WriteVerifiedCheckpointHashesWork::maybeParseTrustedHashFile()
{
if (mFromLedger || !mTrustedHashFileName)
if (mFromLedger || !mTrustedHashPath)
{
return;
}
mLatestTrustedHashPair =
loadLatestHashPairFromJsonOutput(*mTrustedHashFileName);
CLOG_INFO(History, "trusted hash from {}: {}", *mTrustedHashFileName,
loadLatestHashPairFromJsonOutput(*mTrustedHashPath);
CLOG_INFO(History, "trusted hash from {}. Ledger Seq: {} Hash: {}",
*mTrustedHashPath, mLatestTrustedHashPair->first,
hexAbbrev(*mLatestTrustedHashPair->second));
}

Expand All @@ -265,12 +280,11 @@ WriteVerifiedCheckpointHashesWork::endOutputFile()
{
if (mOutputFile && mOutputFile->is_open())
{
if (mTrustedHashFileName &&
std::filesystem::exists(*mTrustedHashFileName))
if (mTrustedHashPath && std::filesystem::exists(*mTrustedHashPath))
{
// Append everything except the first line of mTrustedHashFile to
// mOutputFile.
std::ifstream trustedHashFile(*mTrustedHashFileName);
std::ifstream trustedHashFile(*mTrustedHashPath);
if (trustedHashFile)
{
std::string line;
Expand All @@ -286,7 +300,7 @@ WriteVerifiedCheckpointHashesWork::endOutputFile()
else
{
CLOG_WARNING(History, "failed to open trusted hash file {}",
*mTrustedHashFileName);
*mTrustedHashPath);
}
}
else
Expand All @@ -300,6 +314,24 @@ WriteVerifiedCheckpointHashesWork::endOutputFile()
}
mOutputFile->close();
mOutputFile.reset();
if (mAppendToFile)
{
if (!std::filesystem::exists(*mTrustedHashPath))
{
CLOG_ERROR(History, "trusted hash file {} does not exist",
*mTrustedHashPath);
return;
}
if (!std::filesystem::exists(mOutputPath))
{
CLOG_ERROR(History, "output file {} does not exist",
mOutputPath);
return;
}
// The output file was written to a temporary file, so rename it to
// the trusted hash file name.
std::filesystem::rename(mOutputPath, *mTrustedHashPath);
}
}
}

Expand Down
18 changes: 11 additions & 7 deletions src/historywork/WriteVerifiedCheckpointHashesWork.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,20 @@ class WriteVerifiedCheckpointHashesWork : public BatchWork
public:
WriteVerifiedCheckpointHashesWork(
Application& app, LedgerNumHashPair rangeEnd,
std::string const& outputFile,
std::optional<std::string> const& trustedHashFile,
std::filesystem::path const& outputFile,
std::optional<std::filesystem::path> const& trustedHashFile,
std::optional<uint32_t> const& fromLedger,
uint32_t nestedBatchSize = NESTED_DOWNLOAD_BATCH_SIZE,
std::shared_ptr<HistoryArchive> archive = nullptr);
~WriteVerifiedCheckpointHashesWork();

// Helper to load a hash back from a file produced by this class.
static Hash loadHashFromJsonOutput(uint32_t seq,
std::string const& filename);
std::filesystem::path const& path);
// Helper to load the latest hash back from a file produced by this class.
// If the file does not exist, returns std::nullopt.
static std::optional<LedgerNumHashPair>
loadLatestHashPairFromJsonOutput(std::string const& filename);
loadLatestHashPairFromJsonOutput(std::filesystem::path const& path);

void onSuccess() override;

Expand Down Expand Up @@ -85,9 +85,13 @@ class WriteVerifiedCheckpointHashesWork : public BatchWork
void startOutputFile();
void endOutputFile();
std::shared_ptr<std::ofstream> mOutputFile;
std::optional<std::string> const mTrustedHashFileName;
std::string const mOutputFileName;
std::optional<std::filesystem::path> const mTrustedHashPath;
std::filesystem::path mOutputPath;
// If true, mOutputPath == mTrustedHashPath, and output
// will be written to a temporary file before being renamed to
// mOutputPath when verificaiton is complete.
bool mAppendToFile = false;
std::optional<LedgerNumHashPair> mLatestTrustedHashPair;
std::optional<uint32_t> const& mFromLedger;
std::optional<uint32_t> const mFromLedger;
};
}
17 changes: 16 additions & 1 deletion src/main/CommandLine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ trustedHashFileParser(std::optional<std::string>& string)
return clara::Opt{[&](std::string const& arg) { string = arg; },
"FILE-NAME"}["--trusted-hash-file"](
"file containing trusted hashes, generated by a previous call to "
"verify-checkpoints or a non-existent file to generate a new one");
"verify-checkpoints");
}

clara::Opt
Expand Down Expand Up @@ -1043,12 +1043,27 @@ runWriteVerifiedCheckpointHashes(CommandLineArgs const& args)
trustedHashFileParser(trustedHashFile),
outputFileParser(outputFile).required()},
[&] {
if (outputFile.empty())
{
LOG_ERROR(DEFAULT_LOG, "Must specify --output-file");
return 1;
}
if (fromLedger && trustedHashFile)
{
LOG_ERROR(DEFAULT_LOG, "Cannot specify both --from-ledger and "
"--trusted-hash-file");
return 1;
}
if (trustedHashFile)
{
if (!std::filesystem::exists(*trustedHashFile))
{
LOG_ERROR(DEFAULT_LOG,
"--trusted-hash-file {} does not exist",
*trustedHashFile);
return 1;
}
}
VirtualClock clock(VirtualClock::REAL_TIME);
auto cfg = configOption.getConfig();

Expand Down

0 comments on commit 4de2273

Please sign in to comment.