This commit is contained in:
Ava Chow 2024-04-29 04:33:50 +02:00 committed by GitHub
commit 6fab190757
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 1614 additions and 154 deletions

View File

@ -348,6 +348,7 @@ BITCOIN_CORE_H = \
wallet/feebumper.h \
wallet/fees.h \
wallet/load.h \
wallet/migrate.h \
wallet/receive.h \
wallet/rpc/util.h \
wallet/rpc/wallet.h \
@ -508,6 +509,7 @@ libbitcoin_wallet_a_SOURCES = \
wallet/fees.cpp \
wallet/interfaces.cpp \
wallet/load.cpp \
wallet/migrate.cpp \
wallet/receive.cpp \
wallet/rpc/addresses.cpp \
wallet/rpc/backup.cpp \

View File

@ -204,7 +204,8 @@ FUZZ_WALLET_SRC = \
wallet/test/fuzz/coincontrol.cpp \
wallet/test/fuzz/coinselection.cpp \
wallet/test/fuzz/fees.cpp \
wallet/test/fuzz/parse_iso8601.cpp
wallet/test/fuzz/parse_iso8601.cpp \
wallet/test/fuzz/wallet_bdb_parser.cpp
if USE_SQLITE
FUZZ_WALLET_SRC += \

View File

@ -42,6 +42,7 @@ static void SetupWalletToolArgs(ArgsManager& argsman)
argsman.AddArg("-legacy", "Create legacy wallet. Only for 'create'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-format=<format>", "The format of the wallet file to create. Either \"bdb\" or \"sqlite\". Only used with 'createfromdump'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-withinternalbdb", "Use the internal Berkeley DB parser when dumping a Berkeley DB wallet file (default: false)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
argsman.AddCommand("info", "Get wallet info");
argsman.AddCommand("create", "Create new wallet file");

View File

@ -53,6 +53,7 @@ void DummyWalletInit::AddWalletOptions(ArgsManager& argsman) const
"-walletrejectlongchains",
"-walletcrosschain",
"-unsafesqlitesync",
"-swapbdbendian",
});
}

View File

@ -21,6 +21,28 @@ std::size_t AutoFile::detail_fread(Span<std::byte> dst)
}
}
void AutoFile::seek(int64_t offset, int origin)
{
if (IsNull()) {
throw std::ios_base::failure("AutoFile::seek: file handle is nullptr");
}
if (std::fseek(m_file, offset, origin) != 0) {
throw std::ios_base::failure(feof() ? "AutoFile::seek: end of file" : "AutoFile::seek: fseek failed");
}
}
int64_t AutoFile::tell()
{
if (IsNull()) {
throw std::ios_base::failure("AutoFile::tell: file handle is nullptr");
}
int64_t r{std::ftell(m_file)};
if (r < 0) {
throw std::ios_base::failure("AutoFile::tell: ftell failed");
}
return r;
}
void AutoFile::read(Span<std::byte> dst)
{
if (detail_fread(dst) != dst.size()) {

View File

@ -435,6 +435,9 @@ public:
/** Implementation detail, only used internally. */
std::size_t detail_fread(Span<std::byte> dst);
void seek(int64_t offset, int origin);
int64_t tell();
//
// Stream subset
//

View File

@ -300,7 +300,11 @@ static Span<const std::byte> SpanFromDbt(const SafeDbt& dbt)
}
BerkeleyDatabase::BerkeleyDatabase(std::shared_ptr<BerkeleyEnvironment> env, fs::path filename, const DatabaseOptions& options) :
WalletDatabase(), env(std::move(env)), m_filename(std::move(filename)), m_max_log_mb(options.max_log_mb)
WalletDatabase(),
env(std::move(env)),
m_byteswap(options.require_format == DatabaseFormat::BERKELEY_SWAP),
m_filename(std::move(filename)),
m_max_log_mb(options.max_log_mb)
{
auto inserted = this->env->m_databases.emplace(m_filename, std::ref(*this));
assert(inserted.second);
@ -389,6 +393,10 @@ void BerkeleyDatabase::Open()
}
}
if (m_byteswap) {
pdb_temp->set_lorder(std::endian::native == std::endian::little ? 4321 : 1234);
}
ret = pdb_temp->open(nullptr, // Txn pointer
fMockDb ? nullptr : strFile.c_str(), // Filename
fMockDb ? strFile.c_str() : "main", // Logical db name
@ -521,6 +529,10 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
BerkeleyBatch db(*this, true);
std::unique_ptr<Db> pdbCopy = std::make_unique<Db>(env->dbenv.get(), 0);
if (m_byteswap) {
pdbCopy->set_lorder(std::endian::native == std::endian::little ? 4321 : 1234);
}
int ret = pdbCopy->open(nullptr, // Txn pointer
strFileRes.c_str(), // Filename
"main", // Logical db name

View File

@ -147,6 +147,9 @@ public:
/** Database pointer. This is initialized lazily and reset during flushes, so it can be null. */
std::unique_ptr<Db> m_db;
// Whether to byteswap
bool m_byteswap;
fs::path m_filename;
int64_t m_max_log_mb;

View File

@ -16,6 +16,9 @@
#include <vector>
namespace wallet {
bool operator<(BytePrefix a, Span<const std::byte> b) { return a.prefix < b.subspan(0, std::min(a.prefix.size(), b.size())); }
bool operator<(Span<const std::byte> a, BytePrefix b) { return a.subspan(0, std::min(a.size(), b.prefix.size())) < b.prefix; }
std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
{
std::vector<fs::path> paths;

View File

@ -20,6 +20,10 @@ class ArgsManager;
struct bilingual_str;
namespace wallet {
// BytePrefix compares equality with other byte spans that begin with the same prefix.
struct BytePrefix { Span<const std::byte> prefix; };
bool operator<(BytePrefix a, Span<const std::byte> b);
bool operator<(Span<const std::byte> a, BytePrefix b);
class DatabaseCursor
{
@ -177,6 +181,8 @@ public:
enum class DatabaseFormat {
BERKELEY,
SQLITE,
BERKELEY_RO,
BERKELEY_SWAP,
};
struct DatabaseOptions {

View File

@ -60,7 +60,13 @@ bool DumpWallet(const ArgsManager& args, WalletDatabase& db, bilingual_str& erro
hasher << Span{line};
// Write out the file format
line = strprintf("%s,%s\n", "format", db.Format());
std::string format = db.Format();
// BDB files that are opened using BerkeleyRODatabase have it's format as "bdb_ro"
// We want to override that format back to "bdb"
if (format == "bdb_ro") {
format = "bdb";
}
line = strprintf("%s,%s\n", "format", format);
dump_file.write(line.data(), line.size());
hasher << Span{line};
@ -180,6 +186,8 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
data_format = DatabaseFormat::BERKELEY;
} else if (file_format == "sqlite") {
data_format = DatabaseFormat::SQLITE;
} else if (file_format == "bdb_swap") {
data_format = DatabaseFormat::BERKELEY_SWAP;
} else {
error = strprintf(_("Unknown wallet file format \"%s\" provided. Please provide one of \"bdb\" or \"sqlite\"."), file_format);
return false;

View File

@ -87,8 +87,9 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const
argsman.AddArg("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DatabaseOptions().max_log_mb), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", !DatabaseOptions().use_shared_memory), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
argsman.AddArg("-swapbdbendian", "Swaps the internal endianness of BDB wallet databases (default: false)", ArgsManager:: ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST);
#else
argsman.AddHiddenArgs({"-dblogsize", "-flushwallet", "-privdb"});
argsman.AddHiddenArgs({"-dblogsize", "-flushwallet", "-privdb", "-swapbdbendian"});
#endif
#ifdef USE_SQLITE

786
src/wallet/migrate.cpp Normal file
View File

@ -0,0 +1,786 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <compat/byteswap.h>
#include <crypto/common.h> // For ReadBE32
#include <logging.h>
#include <streams.h>
#include <util/translation.h>
#include <wallet/migrate.h>
#include <optional>
#include <variant>
namespace wallet {
// Magic bytes in both endianness's
constexpr uint32_t BTREE_MAGIC = 0x00053162; // If the file endianness matches our system, we see this magic
constexpr uint32_t BTREE_MAGIC_OE = 0x62310500; // If the file endianness is the other one, we will see this magic
// Subdatabase name
static const std::vector<std::byte> SUBDATABASE_NAME = {std::byte{'m'}, std::byte{'a'}, std::byte{'i'}, std::byte{'n'}};
enum class PageType : uint8_t
{
/*
* BDB has several page types, most of which we do not use
* They are listed here for completeness, but commented out
* to avoid opening something unintended.
INVALID = 0, // Invalid page type
DUPLICATE = 1, // Duplicate. Deprecated and no longer used
HASH_UNSORTED = 2, // Hash pages. Deprecated.
RECNO_INTERNAL = 4, // Recno internal
RECNO_LEAF = 6, // Recno leaf
HASH_META = 8, // Hash metadata
QUEUE_META = 10, // Queue Metadata
QUEUE_DATA = 11, // Queue Data
DUPLICATE_LEAF = 12, // Off-page duplicate leaf
HASH_SORTED = 13, // Sorted hash page
*/
BTREE_INTERNAL = 3, // BTree internal
BTREE_LEAF = 5, // BTree leaf
OVERFLOW_DATA = 7, // Overflow
BTREE_META = 9, // BTree metadata
};
enum class RecordType : uint8_t
{
KEYDATA = 1,
// DUPLICATE = 2, Unused as our databases do not support duplicate records
OVERFLOW_DATA = 3,
DELETE = 0x80, // Indicate this record is deleted. This is OR'd with the real type.
};
enum class BTreeFlags : uint32_t
{
/*
* BTree databases have feature flags, but we do not use them except for
* subdatabases. The unused flags are included for completeness, but commented out
* to avoid accidental use.
DUP = 1, // Duplicates
RECNO = 2, // Recno tree
RECNUM = 4, // BTree: Maintain record counts
FIXEDLEN = 8, // Recno: fixed length records
RENUMBER = 0x10, // Recno: renumber on insert/delete
DUPSORT = 0x40, // Duplicates are sorted
COMPRESS = 0x80, // Compressed
*/
SUBDB = 0x20, // Subdatabases
};
/** Berkeley DB BTree metadata page layout */
class MetaPage
{
public:
uint32_t lsn_file; // Log Sequence Number file
uint32_t lsn_offset; // Log Sequence Number offset
uint32_t page_num; // Current page number
uint32_t magic; // Magic number
uint32_t version; // Version
uint32_t pagesize; // Page size
uint8_t encrypt_algo; // Encryption algorithm
PageType type; // Page type
uint8_t metaflags; // Meta-only flags
uint8_t unused1; // Unused
uint32_t free_list; // Free list page number
uint32_t last_page; // Page number of last page in db
uint32_t partitions; // Number of partitions
uint32_t key_count; // Cached key count
uint32_t record_count; // Cached record count
BTreeFlags flags; // Flags
std::array<std::byte, 20> uid; // 20 byte unique file ID
uint32_t unused2; // Unused
uint32_t minkey; // Minimum key
uint32_t re_len; // Recno: fixed length record length
uint32_t re_pad; // Recno: fixed length record pad
uint32_t root; // Root page number
char unused3[368]; // 92 * 4 bytes of unused space
uint32_t crypto_magic; // Crypto magic number
char trash[12]; // 3 * 4 bytes of trash space
unsigned char iv[20]; // Crypto IV
unsigned char chksum[16]; // Checksum
bool other_endian;
uint32_t expected_page_num;
MetaPage(uint32_t expected_page_num) : expected_page_num(expected_page_num) {}
MetaPage() = delete;
template<typename Stream>
void Unserialize(Stream& s)
{
s >> lsn_file;
s >> lsn_offset;
s >> page_num;
s >> magic;
s >> version;
s >> pagesize;
s >> encrypt_algo;
other_endian = magic == BTREE_MAGIC_OE;
uint8_t uint8_type;
s >> uint8_type;
type = static_cast<PageType>(uint8_type);
s >> metaflags;
s >> unused1;
s >> free_list;
s >> last_page;
s >> partitions;
s >> key_count;
s >> record_count;
uint32_t uint32_flags;
s >> uint32_flags;
if (other_endian) {
uint32_flags = internal_bswap_32(uint32_flags);
}
flags = static_cast<BTreeFlags>(uint32_flags);
s >> uid;
s >> unused2;
s >> minkey;
s >> re_len;
s >> re_pad;
s >> root;
s >> unused3;
s >> crypto_magic;
s >> trash;
s >> iv;
s >> chksum;
if (other_endian) {
lsn_file = internal_bswap_32(lsn_file);
lsn_offset = internal_bswap_32(lsn_offset);
page_num = internal_bswap_32(page_num);
magic = internal_bswap_32(magic);
version = internal_bswap_32(version);
pagesize = internal_bswap_32(pagesize);
free_list = internal_bswap_32(free_list);
last_page = internal_bswap_32(last_page);
partitions = internal_bswap_32(partitions);
key_count = internal_bswap_32(key_count);
record_count = internal_bswap_32(record_count);
unused2 = internal_bswap_32(unused2);
minkey = internal_bswap_32(minkey);
re_len = internal_bswap_32(re_len);
re_pad = internal_bswap_32(re_pad);
root = internal_bswap_32(root);
crypto_magic = internal_bswap_32(crypto_magic);
}
// Page number must match
if (page_num != expected_page_num) {
throw std::runtime_error("Meta page number mismatch");
}
// Check magic
if (magic != BTREE_MAGIC) {
throw std::runtime_error("Not a BDB file");
}
// Only version 9 is supported
if (version != 9) {
throw std::runtime_error("Unsupported BDB data file version number");
}
// Page size must be 512 <= pagesize <= 64k, and be a power of 2
if (pagesize < 512 || pagesize > 65536 || (pagesize & (pagesize - 1)) != 0) {
throw std::runtime_error("Bad page size");
}
// Page type must be the btree type
if (type != PageType::BTREE_META) {
throw std::runtime_error("Unexpected page type, should be 9 (BTree Metadata)");
}
// Only supported meta-flag is subdatabase
if (flags != BTreeFlags::SUBDB) {
throw std::runtime_error("Unexpected database flags, should only be 0x20 (subdatabases)");
}
}
};
/** General class for records in a BDB BTree database. Contains common fields. */
class RecordHeader
{
public:
uint16_t len; // Key/data item length
RecordType type; // Page type (BDB has this include a DELETE FLAG that we track separately)
bool deleted; // Whether the DELETE flag was set on type
static constexpr size_t SIZE = 3; // The record header is 3 bytes
bool other_endian;
RecordHeader(bool other_endian) : other_endian(other_endian) {}
RecordHeader() = delete;
template<typename Stream>
void Unserialize(Stream& s)
{
s >> len;
uint8_t uint8_type;
s >> uint8_type;
type = static_cast<RecordType>(uint8_type & ~static_cast<uint8_t>(RecordType::DELETE));
deleted = uint8_type & static_cast<uint8_t>(RecordType::DELETE);
if (other_endian) {
len = internal_bswap_16(len);
}
}
};
/** Class for data in the record directly */
class DataRecord
{
public:
DataRecord(const RecordHeader& header) : m_header(header) {}
DataRecord() = delete;
RecordHeader m_header;
std::vector<std::byte> data; // Variable length key/data item
template<typename Stream>
void Unserialize(Stream& s)
{
data.resize(m_header.len);
s.read(AsWritableBytes(Span(data.data(), data.size())));
}
};
/** Class for records representing internal nodes of the BTree. */
class InternalRecord
{
public:
InternalRecord(const RecordHeader& header) : m_header(header) {}
InternalRecord() = delete;
RecordHeader m_header;
uint8_t unused; // Padding, unused
uint32_t page_num; // Page number of referenced page
uint32_t records; // Subtree record count
std::vector<std::byte> data; // Variable length key item
static constexpr size_t FIXED_SIZE = 9; // Size of fixed data is 9 bytes
template<typename Stream>
void Unserialize(Stream& s)
{
s >> unused;
s >> page_num;
s >> records;
data.resize(m_header.len);
s.read(AsWritableBytes(Span(data.data(), data.size())));
if (m_header.other_endian) {
page_num = internal_bswap_32(page_num);
records = internal_bswap_32(records);
}
}
};
/** Class for records representing overflow records of the BTree.
* Overflow records point to a page which contains the data in the record.
* Those pages may point to further pages with the rest of the data if it does not fit
* in one page */
class OverflowRecord
{
public:
OverflowRecord(const RecordHeader& header) : m_header(header) {}
OverflowRecord() = delete;
RecordHeader m_header;
uint8_t unused2; // Padding, unused
uint32_t page_number; // Page number where data begins
uint32_t item_len; // Total length of item
static constexpr size_t SIZE = 9; // Overflow record is always 9 bytes
template<typename Stream>
void Unserialize(Stream& s)
{
s >> unused2;
s >> page_number;
s >> item_len;
if (m_header.other_endian) {
page_number = internal_bswap_32(page_number);
item_len = internal_bswap_32(item_len);
}
}
};
/** A generic data page in the database. Contains fields common to all data pages. */
class PageHeader
{
public:
uint32_t lsn_file; // Log Sequence Number file
uint32_t lsn_offset;// Log Sequence Number offset
uint32_t page_num; // Current page number
uint32_t prev_page; // Previous page number
uint32_t next_page; // Next page number
uint16_t entries; // Number of items on the page
uint16_t hf_offset; // High free byte page offset
uint8_t level; // Btree page level
PageType type; // Page type
static constexpr int64_t SIZE = 26; // The header is 26 bytes
uint32_t expected_page_num;
bool other_endian;
PageHeader(uint32_t page_num, bool other_endian) : expected_page_num(page_num), other_endian(other_endian) {}
PageHeader() = delete;
template<typename Stream>
void Unserialize(Stream& s)
{
s >> lsn_file;
s >> lsn_offset;
s >> page_num;
s >> prev_page;
s >> next_page;
s >> entries;
s >> hf_offset;
s >> level;
uint8_t uint8_type;
s >> uint8_type;
type = static_cast<PageType>(uint8_type);
if (other_endian) {
lsn_file = internal_bswap_32(lsn_file);
lsn_offset = internal_bswap_32(lsn_offset);
page_num = internal_bswap_32(page_num);
prev_page = internal_bswap_32(prev_page);
next_page = internal_bswap_32(next_page);
entries = internal_bswap_16(entries);
hf_offset = internal_bswap_16(hf_offset);
}
if (expected_page_num != page_num) {
throw std::runtime_error("Page number mismatch");
}
if ((type != PageType::OVERFLOW_DATA && level < 1) || (type == PageType::OVERFLOW_DATA && level != 0)) {
throw std::runtime_error("Bad btree level");
}
}
};
/** A page of records in the database */
class RecordsPage
{
public:
RecordsPage(const PageHeader& header) : m_header(header) {}
RecordsPage() = delete;
PageHeader m_header;
std::vector<uint16_t> indexes;
std::vector<std::variant<DataRecord, OverflowRecord>> records;
template<typename Stream>
void Unserialize(Stream& s)
{
// Current position within the page
int64_t pos = PageHeader::SIZE;
// Get the items
for (uint32_t i = 0; i < m_header.entries; ++i) {
// Get the index
uint16_t index;
s >> index;
if (m_header.other_endian) {
index = internal_bswap_16(index);
}
indexes.push_back(index);
pos += sizeof(uint16_t);
// Go to the offset from the index
int64_t to_jump = index - pos;
if (to_jump < 0) {
throw std::runtime_error("Data record position not in page");
}
s.ignore(to_jump);
// Read the record
RecordHeader rec_hdr(m_header.other_endian);
s >> rec_hdr;
to_jump += RecordHeader::SIZE;
switch (rec_hdr.type) {
case RecordType::KEYDATA:
{
DataRecord record(rec_hdr);
s >> record;
records.emplace_back(record);
to_jump += rec_hdr.len;
break;
}
case RecordType::OVERFLOW_DATA:
{
OverflowRecord record(rec_hdr);
s >> record;
records.emplace_back(record);
to_jump += OverflowRecord::SIZE;
break;
}
default:
throw std::runtime_error("Unknown record type in records page");
}
// Go back to the indexes
s.seek(-to_jump, SEEK_CUR);
}
}
};
/** A page containing overflow data */
class OverflowPage
{
public:
OverflowPage(const PageHeader& header) : m_header(header) {}
OverflowPage() = delete;
PageHeader m_header;
// BDB overloads some page fields to store overflow page data
// hf_offset contains the length of the overflow data stored on this page
// entries contains a reference count for references to this item
// The overflow data itself. Begins immediately following header
std::vector<std::byte> data;
template<typename Stream>
void Unserialize(Stream& s)
{
data.resize(m_header.hf_offset);
s.read(AsWritableBytes(Span(data.data(), data.size())));
}
};
/** A page of records in the database */
class InternalPage
{
public:
InternalPage(const PageHeader& header) : m_header(header) {}
InternalPage() = delete;
PageHeader m_header;
std::vector<uint16_t> indexes;
std::vector<InternalRecord> records;
template<typename Stream>
void Unserialize(Stream& s)
{
// Current position within the page
int64_t pos = PageHeader::SIZE;
// Get the items
for (uint32_t i = 0; i < m_header.entries; ++i) {
// Get the index
uint16_t index;
s >> index;
if (m_header.other_endian) {
index = internal_bswap_16(index);
}
indexes.push_back(index);
pos += sizeof(uint16_t);
// Go to the offset from the index
int64_t to_jump = index - pos;
if (to_jump < 0) {
throw std::runtime_error("Internal record position not in page");
}
s.ignore(to_jump);
// Read the record
RecordHeader rec_hdr(m_header.other_endian);
s >> rec_hdr;
to_jump += RecordHeader::SIZE;
if (rec_hdr.type != RecordType::KEYDATA) {
throw std::runtime_error("Unknown record type in internal page");
}
InternalRecord record(rec_hdr);
s >> record;
records.emplace_back(record);
to_jump += InternalRecord::FIXED_SIZE + rec_hdr.len;
// Go back to the indexes
s.seek(-to_jump, SEEK_CUR);
}
}
};
static void SeekToPage(AutoFile& s, uint32_t page_num, uint32_t page_size)
{
int64_t pos = int64_t{page_num} * page_size;
s.seek(pos, SEEK_SET);
}
void BerkeleyRODatabase::Open()
{
// Open the file
FILE *file = fsbridge::fopen(m_filepath, "rb");
AutoFile db_file(file);
if (db_file.IsNull()) {
throw std::runtime_error("BerkeleyRODatabase: Failed to open database file");
}
uint32_t page_size = 4096; // Default page size
// Read the outer metapage
// Expected page number is 0
MetaPage outer_meta(0);
db_file >> outer_meta;
page_size = outer_meta.pagesize;
// Verify the size of the file is a multiple of the page size
db_file.seek(0, SEEK_END);
int64_t size = db_file.tell();
/* BDB doesn't actually error on this
if (size % page_size != 0) {
throw std::runtime_error("File size is not a multiple of page size");
}
*/
// Check the last page number
uint32_t expected_last_page = (size / page_size) - 1;
if (outer_meta.last_page != expected_last_page) {
throw std::runtime_error("Last page number could not fit in file");
}
// Make sure encryption is disabled
if (outer_meta.encrypt_algo != 0) {
throw std::runtime_error("BDB builtin encryption is not supported");
}
// Check all LSNs point to file 0 and offset 1 which indicates that the LSNs were
// reset and that the log files are not necessary to get all of the data in the database.
for (uint32_t i = 0; i < outer_meta.last_page; ++i) {
// The LSN is composed of 2 32-bit ints, the first is a file id, the second an offset
// It will always be the first 8 bytes of a page, so we deserialize it directly for every page
uint32_t file;
uint32_t offset;
SeekToPage(db_file, i, page_size);
db_file >> file >> offset;
if (outer_meta.other_endian) {
file = internal_bswap_32(file);
offset = internal_bswap_32(offset);
}
if (file != 0 || offset != 1) {
throw std::runtime_error("LSNs are not reset, this database is not completely flushed. Please reopen then close the database with a version that has BDB support");
}
}
// Read the root page
SeekToPage(db_file, outer_meta.root, page_size);
PageHeader header(outer_meta.root, outer_meta.other_endian);
db_file >> header;
if (header.type != PageType::BTREE_LEAF) {
throw std::runtime_error("Unexpected outer database root page type");
}
if (header.entries != 2) {
throw std::runtime_error("Unexpected number of entries in outer database root page");
}
RecordsPage page(header);
db_file >> page;
// First record should be the string "main"
if (!std::holds_alternative<DataRecord>(page.records.at(0)) || std::get<DataRecord>(page.records.at(0)).data != SUBDATABASE_NAME) {
throw std::runtime_error("Subdatabase has an unexpected name");
}
// Check length of page number for subdatabase location
if (!std::holds_alternative<DataRecord>(page.records.at(1)) || std::get<DataRecord>(page.records.at(1)).m_header.len != 4) {
throw std::runtime_error("Subdatabase page number has unexpected length");
}
// Read subdatabase page number
// It is written as a big endian 32 bit number
uint32_t main_db_page = ReadBE32(UCharCast(std::get<DataRecord>(page.records.at(1)).data.data()));
// The main database is in a page that doesn't exist
if (main_db_page > outer_meta.last_page) {
throw std::runtime_error("Page number is greater than database last page");
}
// Read the inner metapage
SeekToPage(db_file, main_db_page, page_size);
MetaPage inner_meta(main_db_page);
db_file >> inner_meta;
if (inner_meta.pagesize != page_size) {
throw std::runtime_error("Unexpected page size");
}
if (inner_meta.last_page > outer_meta.last_page) {
throw std::runtime_error("Subdatabase last page is greater than database last page");
}
// Make sure encryption is disabled
if (inner_meta.encrypt_algo != 0) {
throw std::runtime_error("BDB builtin encryption is not supported");
}
// Do a DFS through the BTree, starting at root
std::vector<uint32_t> pages{inner_meta.root};
while (pages.size() > 0) {
uint32_t curr_page = pages.back();
if (curr_page > inner_meta.last_page) {
std::runtime_error("Page number is greater than subdatabase last page");
}
pages.pop_back();
SeekToPage(db_file, curr_page, page_size);
PageHeader header(curr_page, inner_meta.other_endian);
db_file >> header;
switch (header.type) {
case PageType::BTREE_INTERNAL:
{
InternalPage int_page(header);
db_file >> int_page;
for (const InternalRecord& rec : int_page.records) {
if (rec.m_header.deleted) continue;
pages.push_back(rec.page_num);
}
break;
}
case PageType::BTREE_LEAF:
{
RecordsPage rec_page(header);
db_file >> rec_page;
if (rec_page.records.size() % 2 != 0) {
// BDB stores key value pairs in consecutive records, thus an odd number of records is unexpected
throw std::runtime_error("Records page has odd number of records");
}
bool is_key = true;
std::vector<std::byte> key;
for (const std::variant<DataRecord, OverflowRecord>& rec : rec_page.records) {
std::vector<std::byte> data;
if (const DataRecord* drec = std::get_if<DataRecord>(&rec)) {
if (drec->m_header.deleted) continue;
data = drec->data;
} else if (const OverflowRecord* orec = std::get_if<OverflowRecord>(&rec)) {
if (orec->m_header.deleted) continue;
uint32_t next_page = orec->page_number;
while (next_page != 0) {
SeekToPage(db_file, next_page, page_size);
PageHeader opage_header(next_page, inner_meta.other_endian);
db_file >> opage_header;
if (opage_header.type != PageType::OVERFLOW_DATA) {
throw std::runtime_error("Bad overflow record page type");
}
OverflowPage opage(opage_header);
db_file >> opage;
data.insert(data.end(), opage.data.begin(), opage.data.end());
next_page = opage_header.next_page;
}
}
if (is_key) {
key = data;
} else {
m_records.emplace(SerializeData{key.begin(), key.end()}, SerializeData{data.begin(), data.end()});
key.clear();
}
is_key = !is_key;
}
break;
}
default:
throw std::runtime_error("Unexpected page type");
}
}
}
std::unique_ptr<DatabaseBatch> BerkeleyRODatabase::MakeBatch(bool flush_on_close)
{
return std::make_unique<BerkeleyROBatch>(*this);
}
bool BerkeleyRODatabase::Backup(const std::string& dest) const
{
fs::path src(m_filepath);
fs::path dst(fs::PathFromString(dest));
if (fs::is_directory(dst)) {
dst = BDBDataFile(dst);
}
try {
if (fs::exists(dst) && fs::equivalent(src, dst)) {
LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(dst));
return false;
}
fs::copy_file(src, dst, fs::copy_options::overwrite_existing);
LogPrintf("copied %s to %s\n", fs::PathToString(m_filepath), fs::PathToString(dst));
return true;
} catch (const fs::filesystem_error& e) {
LogPrintf("error copying %s to %s - %s\n", fs::PathToString(m_filepath), fs::PathToString(dst), fsbridge::get_filesystem_error_message(e));
return false;
}
}
bool BerkeleyROBatch::ReadKey(DataStream&& key, DataStream& value)
{
SerializeData key_data{key.begin(), key.end()};
const auto it{m_database.m_records.find(key_data)};
if (it == m_database.m_records.end()) {
return false;
}
auto val = it->second;
value.clear();
value.write(Span(val));
return true;
}
bool BerkeleyROBatch::HasKey(DataStream&& key)
{
SerializeData key_data{key.begin(), key.end()};
return m_database.m_records.count(key_data) > 0;
}
BerkeleyROCursor::BerkeleyROCursor(const BerkeleyRODatabase& database, Span<const std::byte> prefix)
: m_database(database)
{
std::tie(m_cursor, m_cursor_end) = m_database.m_records.equal_range(BytePrefix{prefix});
}
DatabaseCursor::Status BerkeleyROCursor::Next(DataStream& ssKey, DataStream& ssValue)
{
if (m_cursor == m_cursor_end) {
return DatabaseCursor::Status::DONE;
}
ssKey.write(Span(m_cursor->first));
ssValue.write(Span(m_cursor->second));
m_cursor++;
return DatabaseCursor::Status::MORE;
}
std::unique_ptr<DatabaseCursor> BerkeleyROBatch::GetNewPrefixCursor(Span<const std::byte> prefix)
{
return std::make_unique<BerkeleyROCursor>(m_database, prefix);
}
std::unique_ptr<BerkeleyRODatabase> MakeBerkeleyRODatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
fs::path data_file = BDBDataFile(path);
try {
std::unique_ptr<BerkeleyRODatabase> db = std::make_unique<BerkeleyRODatabase>(data_file);
status = DatabaseStatus::SUCCESS;
return db;
} catch (const std::runtime_error& e) {
error.original = e.what();
status = DatabaseStatus::FAILED_LOAD;
return nullptr;
}
}
} // namespace wallet

122
src/wallet/migrate.h Normal file
View File

@ -0,0 +1,122 @@
// Copyright (c) 2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_WALLET_MIGRATE_H
#define BITCOIN_WALLET_MIGRATE_H
#include <wallet/db.h>
#include <optional>
namespace wallet {
using BerkeleyROData = std::map<SerializeData, SerializeData, std::less<>>;
/**
* A class representing a BerkeleyDB file from which we can only read records.
* This is used only for migration of legacy to descriptor wallets
*/
class BerkeleyRODatabase : public WalletDatabase
{
private:
const fs::path m_filepath;
public:
/** Create DB handle */
BerkeleyRODatabase(const fs::path& filepath, bool open = true) : WalletDatabase(), m_filepath(filepath)
{
if (open) Open();
}
~BerkeleyRODatabase() {};
BerkeleyROData m_records;
/** Open the database if it is not already opened. */
void Open() override;
/** Indicate the a new database user has began using the database. Increments m_refcount */
void AddRef() override {}
/** Indicate that database user has stopped using the database and that it could be flushed or closed. Decrement m_refcount */
void RemoveRef() override {}
/** Rewrite the entire database on disk, with the exception of key pszSkip if non-zero
*/
bool Rewrite(const char* pszSkip=nullptr) override { return false; }
/** Back up the entire database to a file.
*/
bool Backup(const std::string& strDest) const override;
/** Make sure all changes are flushed to database file.
*/
void Flush() override {}
/** Flush to the database file and close the database.
* Also close the environment if no other databases are open in it.
*/
void Close() override {}
/* flush the wallet passively (TRY_LOCK)
ideal to be called periodically */
bool PeriodicFlush() override { return false; }
void IncrementUpdateCounter() override {}
void ReloadDbEnv() override {}
/** Return path to main database file for logs and error messages. */
std::string Filename() override { return fs::PathToString(m_filepath); }
std::string Format() override { return "bdb_ro"; }
/** Make a DatabaseBatch connected to this database */
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override;
};
class BerkeleyROCursor : public DatabaseCursor
{
private:
const BerkeleyRODatabase& m_database;
BerkeleyROData::const_iterator m_cursor;
BerkeleyROData::const_iterator m_cursor_end;
public:
explicit BerkeleyROCursor(const BerkeleyRODatabase& database, Span<const std::byte> prefix = {});
~BerkeleyROCursor() {}
Status Next(DataStream& key, DataStream& value) override;
};
/** RAII class that provides access to a BerkeleyRODatabase */
class BerkeleyROBatch : public DatabaseBatch
{
private:
const BerkeleyRODatabase& m_database;
bool ReadKey(DataStream&& key, DataStream& value) override;
bool WriteKey(DataStream&& key, DataStream&& value, bool overwrite=true) override { return true; }
bool EraseKey(DataStream&& key) override { return false; }
bool HasKey(DataStream&& key) override;
bool ErasePrefix(Span<const std::byte> prefix) override { return false; }
public:
explicit BerkeleyROBatch(const BerkeleyRODatabase& database) : m_database(database) {}
~BerkeleyROBatch() {}
BerkeleyROBatch(const BerkeleyROBatch&) = delete;
BerkeleyROBatch& operator=(const BerkeleyROBatch&) = delete;
void Flush() override {}
void Close() override {}
std::unique_ptr<DatabaseCursor> GetNewCursor() override { return std::make_unique<BerkeleyROCursor>(m_database); }
std::unique_ptr<DatabaseCursor> GetNewPrefixCursor(Span<const std::byte> prefix) override;
bool TxnBegin() override { return false; }
bool TxnCommit() override { return false; }
bool TxnAbort() override { return false; }
};
//! Return object giving access to Berkeley Read Only database at specified path.
std::unique_ptr<BerkeleyRODatabase> MakeBerkeleyRODatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
} // namespace wallet
#endif // BITCOIN_WALLET_MIGRATE_H

View File

@ -80,7 +80,7 @@ bool PermitsUncompressed(IsMineSigVersion sigversion)
return sigversion == IsMineSigVersion::TOP || sigversion == IsMineSigVersion::P2SH;
}
bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyScriptPubKeyMan& keystore)
bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyDataSPKM& keystore)
{
for (const valtype& pubkey : pubkeys) {
CKeyID keyID = CPubKey(pubkey).GetID();
@ -227,7 +227,7 @@ isminetype LegacyScriptPubKeyMan::IsMine(const CScript& script) const
assert(false);
}
bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key)
bool LegacyDataSPKM::CheckDecryptionKey(const CKeyingMaterial& master_key)
{
{
LOCK(cs_KeyStore);
@ -581,7 +581,7 @@ int64_t LegacyScriptPubKeyMan::GetTimeFirstKey() const
return nTimeFirstKey;
}
std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSolvingProvider(const CScript& script) const
std::unique_ptr<SigningProvider> LegacyDataSPKM::GetSolvingProvider(const CScript& script) const
{
return std::make_unique<LegacySigningProvider>(*this);
}
@ -717,7 +717,7 @@ void LegacyScriptPubKeyMan::UpdateTimeFirstKey(int64_t nCreateTime)
NotifyFirstKeyTimeChanged(this, nTimeFirstKey);
}
bool LegacyScriptPubKeyMan::LoadKey(const CKey& key, const CPubKey &pubkey)
bool LegacyDataSPKM::LoadKey(const CKey& key, const CPubKey &pubkey)
{
return AddKeyPubKeyInner(key, pubkey);
}
@ -769,7 +769,7 @@ bool LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(WalletBatch& batch, const CKey& s
return true;
}
bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript)
bool LegacyDataSPKM::LoadCScript(const CScript& redeemScript)
{
/* A sanity check was added in pull #3843 to avoid adding redeemScripts
* that never can be redeemed. However, old wallets may still contain
@ -784,18 +784,39 @@ bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript)
return FillableSigningProvider::AddCScript(redeemScript);
}
void LegacyDataSPKM::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta)
{
LOCK(cs_KeyStore);
mapKeyMetadata[keyID] = meta;
}
void LegacyScriptPubKeyMan::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta)
{
LOCK(cs_KeyStore);
LegacyDataSPKM::LoadKeyMetadata(keyID, meta);
UpdateTimeFirstKey(meta.nCreateTime);
mapKeyMetadata[keyID] = meta;
}
void LegacyDataSPKM::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta)
{
LOCK(cs_KeyStore);
m_script_metadata[script_id] = meta;
}
void LegacyScriptPubKeyMan::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta)
{
LOCK(cs_KeyStore);
LegacyDataSPKM::LoadScriptMetadata(script_id, meta);
UpdateTimeFirstKey(meta.nCreateTime);
m_script_metadata[script_id] = meta;
}
bool LegacyDataSPKM::AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey)
{
// This function should only be called during loading of a legacy to be migrated.
// As such, the wallet should not be encrypted if this is called.
LOCK(cs_KeyStore);
assert(!m_storage.HasEncryptionKeys());
return FillableSigningProvider::AddKeyPubKey(key, pubkey);
}
bool LegacyScriptPubKeyMan::AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey)
@ -823,7 +844,7 @@ bool LegacyScriptPubKeyMan::AddKeyPubKeyInner(const CKey& key, const CPubKey &pu
return true;
}
bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid)
bool LegacyDataSPKM::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid)
{
// Set fDecryptionThoroughlyChecked to false when the checksum is invalid
if (!checksum_valid) {
@ -833,7 +854,7 @@ bool LegacyScriptPubKeyMan::LoadCryptedKey(const CPubKey &vchPubKey, const std::
return AddCryptedKeyInner(vchPubKey, vchCryptedSecret);
}
bool LegacyScriptPubKeyMan::AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret)
bool LegacyDataSPKM::AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret)
{
LOCK(cs_KeyStore);
assert(mapKeys.empty());
@ -861,13 +882,13 @@ bool LegacyScriptPubKeyMan::AddCryptedKey(const CPubKey &vchPubKey,
}
}
bool LegacyScriptPubKeyMan::HaveWatchOnly(const CScript &dest) const
bool LegacyDataSPKM::HaveWatchOnly(const CScript &dest) const
{
LOCK(cs_KeyStore);
return setWatchOnly.count(dest) > 0;
}
bool LegacyScriptPubKeyMan::HaveWatchOnly() const
bool LegacyDataSPKM::HaveWatchOnly() const
{
LOCK(cs_KeyStore);
return (!setWatchOnly.empty());
@ -901,12 +922,12 @@ bool LegacyScriptPubKeyMan::RemoveWatchOnly(const CScript &dest)
return true;
}
bool LegacyScriptPubKeyMan::LoadWatchOnly(const CScript &dest)
bool LegacyDataSPKM::LoadWatchOnly(const CScript &dest)
{
return AddWatchOnlyInMem(dest);
}
bool LegacyScriptPubKeyMan::AddWatchOnlyInMem(const CScript &dest)
bool LegacyDataSPKM::AddWatchOnlyInMem(const CScript &dest)
{
LOCK(cs_KeyStore);
setWatchOnly.insert(dest);
@ -950,7 +971,7 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim
return AddWatchOnly(dest);
}
void LegacyScriptPubKeyMan::LoadHDChain(const CHDChain& chain)
void LegacyDataSPKM::LoadHDChain(const CHDChain& chain)
{
LOCK(cs_KeyStore);
m_hd_chain = chain;
@ -971,14 +992,14 @@ void LegacyScriptPubKeyMan::AddHDChain(const CHDChain& chain)
m_hd_chain = chain;
}
void LegacyScriptPubKeyMan::AddInactiveHDChain(const CHDChain& chain)
void LegacyDataSPKM::AddInactiveHDChain(const CHDChain& chain)
{
LOCK(cs_KeyStore);
assert(!chain.seed_id.IsNull());
m_inactive_hd_chains[chain.seed_id] = chain;
}
bool LegacyScriptPubKeyMan::HaveKey(const CKeyID &address) const
bool LegacyDataSPKM::HaveKey(const CKeyID &address) const
{
LOCK(cs_KeyStore);
if (!m_storage.HasEncryptionKeys()) {
@ -987,7 +1008,7 @@ bool LegacyScriptPubKeyMan::HaveKey(const CKeyID &address) const
return mapCryptedKeys.count(address) > 0;
}
bool LegacyScriptPubKeyMan::GetKey(const CKeyID &address, CKey& keyOut) const
bool LegacyDataSPKM::GetKey(const CKeyID &address, CKey& keyOut) const
{
LOCK(cs_KeyStore);
if (!m_storage.HasEncryptionKeys()) {
@ -1006,7 +1027,7 @@ bool LegacyScriptPubKeyMan::GetKey(const CKeyID &address, CKey& keyOut) const
return false;
}
bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& info) const
bool LegacyDataSPKM::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& info) const
{
CKeyMetadata meta;
{
@ -1026,7 +1047,7 @@ bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& inf
return true;
}
bool LegacyScriptPubKeyMan::GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const
bool LegacyDataSPKM::GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const
{
LOCK(cs_KeyStore);
WatchKeyMap::const_iterator it = mapWatchKeys.find(address);
@ -1037,7 +1058,7 @@ bool LegacyScriptPubKeyMan::GetWatchPubKey(const CKeyID &address, CPubKey &pubke
return false;
}
bool LegacyScriptPubKeyMan::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const
bool LegacyDataSPKM::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const
{
LOCK(cs_KeyStore);
if (!m_storage.HasEncryptionKeys()) {
@ -1156,7 +1177,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
throw std::runtime_error(std::string(__func__) + ": writing HD chain model failed");
}
void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
void LegacyDataSPKM::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
{
LOCK(cs_KeyStore);
if (keypool.m_pre_split) {
@ -1677,7 +1698,7 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const
return set_address;
}
std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPubKeys() const
std::unordered_set<CScript, SaltedSipHasher> LegacyDataSPKM::GetScriptPubKeys() const
{
LOCK(cs_KeyStore);
std::unordered_set<CScript, SaltedSipHasher> spks;
@ -1699,43 +1720,163 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPub
// For all keys, if they have segwit scripts, those scripts will end up in mapScripts
for (const auto& script_pair : mapScripts) {
const CScript& script = script_pair.second;
if (IsMine(script) == ISMINE_SPENDABLE) {
// Add ScriptHash for scripts that are not already P2SH
if (!script.IsPayToScriptHash()) {
std::vector<std::vector<unsigned char>> sols;
TxoutType type = Solver(script, sols);
switch (type) {
// We don't care aboue these types because they are not spendable
case TxoutType::NONSTANDARD:
case TxoutType::NULL_DATA:
case TxoutType::WITNESS_UNKNOWN:
case TxoutType::WITNESS_V1_TAPROOT:
case TxoutType::SCRIPTHASH:
// These are only spendable if the witness scripts is also spendable as a scriptPubKey
// We will check these later after spks has been updated with spks from scripts.
case TxoutType::WITNESS_V0_SCRIPTHASH:
break;
// These scriptPubKeys have already been handled by dealing with the keys
// However if these scripts are here, then the P2SH nested spk will be spendable if these are also scriptPubKeys
case TxoutType::PUBKEY:
case TxoutType::PUBKEYHASH:
if (spks.count(script) > 0) {
spks.insert(GetScriptForDestination(ScriptHash(script)));
}
// For segwit scripts, we only consider them spendable if we have the segwit spk
int wit_ver = -1;
std::vector<unsigned char> witprog;
if (script.IsWitnessProgram(wit_ver, witprog) && wit_ver == 0) {
break;
case TxoutType::WITNESS_V0_KEYHASH:
{
CKeyID key_id{uint160(sols[0])};
CPubKey pubkey;
if (GetPubKey(key_id, pubkey) && pubkey.IsCompressed() && HaveKey(key_id)) {
spks.insert(script);
spks.insert(GetScriptForDestination(ScriptHash(script)));
}
} else {
// Multisigs are special. They don't show up as ISMINE_SPENDABLE unless they are in a P2SH
// So check the P2SH of a multisig to see if we should insert it
std::vector<std::vector<unsigned char>> sols;
TxoutType type = Solver(script, sols);
if (type == TxoutType::MULTISIG) {
CScript ms_spk = GetScriptForDestination(ScriptHash(script));
if (IsMine(ms_spk) != ISMINE_NO) {
spks.insert(ms_spk);
break;
}
case TxoutType::MULTISIG:
{
// Multisigs are only spendable if we have all of their keys
std::vector<std::vector<unsigned char>> keys(sols.begin() + 1, sols.begin() + sols.size() - 1);
if (!HaveKeys(keys, *this)) {
break;
}
// Multisigs are always spendable inside of P2SH scripts
spks.insert(GetScriptForDestination(ScriptHash(script)));
// We need to have the P2WSH script for the P2WSH to be spendable.
// But the keys also must be compressed
CScript ms_wsh = GetScriptForDestination(WitnessV0ScriptHash(script));
if (HaveCScript(CScriptID(ms_wsh))) {
bool allowed = true;
for (const auto& key : keys) {
if (key.size() != 33) {
allowed = false;
break;
}
}
if (allowed) {
spks.insert(ms_wsh);
spks.insert(GetScriptForDestination(ScriptHash(ms_wsh)));
}
}
break;
}
}
}
// Iterate again for all the P2WSH scripts
for (const auto& script_pair : mapScripts) {
const CScript& script = script_pair.second;
std::vector<std::vector<unsigned char>> sols;
TxoutType type = Solver(script, sols);
if (type == TxoutType::WITNESS_V0_SCRIPTHASH) {
uint160 hash;
CRIPEMD160().Write(sols[0].data(), sols[0].size()).Finalize(hash.begin());
CScript witness_script;
int wit_ver = -1;
std::vector<unsigned char> wit_prog;
if (GetCScript(CScriptID(hash), witness_script) &&
!witness_script.IsPayToScriptHash() &&
!witness_script.IsWitnessProgram(wit_ver, wit_prog) &&
spks.count(witness_script) > 0) {
spks.insert(script);
spks.insert(GetScriptForDestination(ScriptHash(script)));
}
}
}
enum class ScriptContext {
TOP,
P2SH,
P2WSH,
};
std::function<bool(const CScript&, const ScriptContext)> is_valid_script = [&](const CScript& script, const ScriptContext ctx) -> bool {
std::vector<valtype> sols;
TxoutType spk_type = Solver(script, sols);
CKeyID keyID;
switch (spk_type) {
case TxoutType::NONSTANDARD:
case TxoutType::NULL_DATA:
case TxoutType::WITNESS_UNKNOWN:
case TxoutType::WITNESS_V1_TAPROOT:
return ctx == ScriptContext::TOP;
case TxoutType::PUBKEY:
if (ctx == ScriptContext::P2WSH && sols[0].size() != 33) return false;
return true;
case TxoutType::WITNESS_V0_KEYHASH:
return ctx != ScriptContext::P2WSH;
case TxoutType::PUBKEYHASH:
if (ctx == ScriptContext::P2WSH) {
CPubKey pubkey;
if (GetPubKey(CKeyID(uint160(sols[0])), pubkey) && !pubkey.IsCompressed()) {
return false;
}
}
return true;
case TxoutType::SCRIPTHASH:
{
if (ctx != ScriptContext::TOP) return false;
CScriptID script_id = CScriptID(uint160(sols[0]));
CScript subscript;
if (GetCScript(script_id, subscript)) {
return is_valid_script(subscript, ScriptContext::P2SH);
}
return true;
}
case TxoutType::WITNESS_V0_SCRIPTHASH:
{
if (ctx == ScriptContext::P2WSH) return false;
CScriptID script_id{RIPEMD160(sols[0])};
CScript subscript;
if (GetCScript(script_id, subscript)) {
return is_valid_script(subscript, ScriptContext::P2WSH);
}
return true;
}
case TxoutType::MULTISIG:
{
if (ctx == ScriptContext::P2WSH) {
std::vector<valtype> keys(sols.begin() + 1, sols.begin() + sols.size() - 1);
for (size_t i = 0; i < keys.size(); i++) {
if (keys[i].size() != 33) {
return false;
}
}
}
return true;
}
}
assert(false);
};
// All watchonly scripts are raw
for (const CScript& script : setWatchOnly) {
// As the legacy wallet allowed to import any script, we need to verify the validity here.
// LegacyScriptPubKeyMan::IsMine() return 'ISMINE_NO' for invalid or not watched scripts (IsMineResult::INVALID or IsMineResult::NO).
// e.g. a "sh(sh(pkh()))" which legacy wallets allowed to import!.
if (IsMine(script) != ISMINE_NO) spks.insert(script);
if (is_valid_script(script, ScriptContext::TOP)) spks.insert(script);
}
return spks;
}
std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineScriptPubKeys() const
std::unordered_set<CScript, SaltedSipHasher> LegacyDataSPKM::GetNotMineScriptPubKeys() const
{
LOCK(cs_KeyStore);
std::unordered_set<CScript, SaltedSipHasher> spks;
@ -1745,7 +1886,7 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineSc
return spks;
}
std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
std::optional<MigrationData> LegacyDataSPKM::MigrateToDescriptor()
{
LOCK(cs_KeyStore);
if (m_storage.IsLocked()) {
@ -1812,7 +1953,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0);
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, /*keypool_size=*/0));
desc_spk_man->AddDescriptorKey(key, key.GetPubKey());
desc_spk_man->TopUp();
auto desc_spks = desc_spk_man->GetScriptPubKeys();
@ -1821,7 +1962,6 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
for (const CScript& spk : desc_spks) {
size_t erased = spks.erase(spk);
assert(erased == 1);
assert(IsMine(spk) == ISMINE_SPENDABLE);
}
out.desc_spkms.push_back(std::move(desc_spk_man));
@ -1857,7 +1997,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
WalletDescriptor w_desc(std::move(desc), 0, 0, chain_counter, 0);
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, /*keypool_size=*/0));
desc_spk_man->AddDescriptorKey(master_key.key, master_key.key.GetPubKey());
desc_spk_man->TopUp();
auto desc_spks = desc_spk_man->GetScriptPubKeys();
@ -1866,7 +2006,6 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
for (const CScript& spk : desc_spks) {
size_t erased = spks.erase(spk);
assert(erased == 1);
assert(IsMine(spk) == ISMINE_SPENDABLE);
}
out.desc_spkms.push_back(std::move(desc_spk_man));
@ -1919,7 +2058,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
} else {
// Make the DescriptorScriptPubKeyMan and get the scriptPubKeys
WalletDescriptor w_desc(std::move(desc), creation_time, 0, 0, 0);
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, m_keypool_size));
auto desc_spk_man = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(m_storage, w_desc, /*keypool_size=*/0));
for (const auto& keyid : privkeyids) {
CKey key;
if (!GetKey(keyid, key)) {
@ -1938,7 +2077,6 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
for (const CScript& desc_spk : desc_spks) {
auto del_it = spks.find(desc_spk);
assert(del_it != spks.end());
assert(IsMine(desc_spk) != ISMINE_NO);
it = spks.erase(del_it);
}
}
@ -1973,8 +2111,6 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
// * The P2WSH script is in the wallet and it is being watched
std::vector<std::vector<unsigned char>> keys(sols.begin() + 1, sols.begin() + sols.size() - 1);
if (HaveWatchOnly(sh_spk) || HaveWatchOnly(script) || HaveKeys(keys, *this) || (HaveCScript(CScriptID(witprog)) && HaveWatchOnly(witprog))) {
// The above emulates IsMine for these 3 scriptPubKeys, so double check that by running IsMine
assert(IsMine(sh_spk) != ISMINE_NO || IsMine(witprog) != ISMINE_NO || IsMine(sh_wsh_spk) != ISMINE_NO);
continue;
}
assert(IsMine(sh_spk) == ISMINE_NO && IsMine(witprog) == ISMINE_NO && IsMine(sh_wsh_spk) == ISMINE_NO);
@ -1997,7 +2133,7 @@ std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
return out;
}
bool LegacyScriptPubKeyMan::DeleteRecords()
bool LegacyDataSPKM::DeleteRecords()
{
LOCK(cs_KeyStore);
WalletBatch batch(m_storage.GetDatabase());

View File

@ -276,31 +276,106 @@ static const std::unordered_set<OutputType> LEGACY_OUTPUT_TYPES {
class DescriptorScriptPubKeyMan;
class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProvider
class LegacyDataSPKM : public ScriptPubKeyMan, public FillableSigningProvider
{
private:
//! keeps track of whether Unlock has run a thorough check before
bool fDecryptionThoroughlyChecked = true;
protected:
using WatchOnlySet = std::set<CScript>;
using WatchKeyMap = std::map<CKeyID, CPubKey>;
WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr;
using CryptedKeyMap = std::map<CKeyID, std::pair<CPubKey, std::vector<unsigned char>>>;
CryptedKeyMap mapCryptedKeys GUARDED_BY(cs_KeyStore);
WatchOnlySet setWatchOnly GUARDED_BY(cs_KeyStore);
WatchKeyMap mapWatchKeys GUARDED_BY(cs_KeyStore);
/* the HD chain data model (external chain counters) */
CHDChain m_hd_chain;
std::unordered_map<CKeyID, CHDChain, SaltedSipHasher> m_inactive_hd_chains;
//! keeps track of whether Unlock has run a thorough check before
bool fDecryptionThoroughlyChecked = true;
bool AddWatchOnlyInMem(const CScript &dest);
virtual bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey);
bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
public:
using ScriptPubKeyMan::ScriptPubKeyMan;
// Map from Key ID to key metadata.
std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore);
// Map from Script ID to key metadata (for watch-only keys).
std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore);
// ScriptPubKeyMan overrides
bool CheckDecryptionKey(const CKeyingMaterial& master_key) override;
std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override;
uint256 GetID() const override { return uint256::ONE; }
// FillableSigningProvider overrides
bool HaveKey(const CKeyID &address) const override;
bool GetKey(const CKeyID &address, CKey& keyOut) const override;
bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override;
std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore);
std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore);
std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore);
int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0;
std::map<CKeyID, int64_t> m_pool_key_to_index;
//! Load metadata (used by LoadWallet)
virtual void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata);
virtual void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata);
//! Adds a watch-only address to the store, without saving it to disk (used by LoadWallet)
bool LoadWatchOnly(const CScript &dest);
//! Returns whether the watch-only script is in the wallet
bool HaveWatchOnly(const CScript &dest) const;
//! Returns whether there are any watch-only things in the wallet
bool HaveWatchOnly() const;
//! Adds a key to the store, without saving it to disk (used by LoadWallet)
bool LoadKey(const CKey& key, const CPubKey &pubkey);
//! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet)
bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid);
//! Adds a CScript to the store
bool LoadCScript(const CScript& redeemScript);
//! Load a HD chain model (used by LoadWallet)
void LoadHDChain(const CHDChain& chain);
void AddInactiveHDChain(const CHDChain& chain);
const CHDChain& GetHDChain() const { return m_hd_chain; }
//! Load a keypool entry
void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool);
//! Fetches a pubkey from mapWatchKeys if it exists there
bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const;
/**
* Retrieves scripts that were imported by bugs into the legacy spkm and are
* simply invalid, such as a sh(sh(pkh())) script, or not watched.
*/
std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const;
/** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan.
* Does not modify this ScriptPubKeyMan. */
std::optional<MigrationData> MigrateToDescriptor();
/** Delete all the records ofthis LegacyScriptPubKeyMan from disk*/
bool DeleteRecords();
};
class LegacyScriptPubKeyMan : public LegacyDataSPKM
{
private:
WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr;
// By default, do not scan any block until keys/scripts are generated/imported
int64_t nTimeFirstKey GUARDED_BY(cs_KeyStore) = UNKNOWN_TIME;
//! Number of pre-generated keys/scripts (part of the look-ahead process, used to detect payments)
int64_t m_keypool_size GUARDED_BY(cs_KeyStore){DEFAULT_KEYPOOL_SIZE};
bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey);
bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey) override;
/**
* Private version of AddWatchOnly method which does not accept a
@ -313,7 +388,6 @@ private:
*/
bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
bool AddWatchOnlyInMem(const CScript &dest);
//! Adds a watch-only address to the store, and saves it to disk.
bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
@ -328,18 +402,9 @@ private:
/** Add a KeyOriginInfo to the wallet */
bool AddKeyOriginWithDB(WalletBatch& batch, const CPubKey& pubkey, const KeyOriginInfo& info);
/* the HD chain data model (external chain counters) */
CHDChain m_hd_chain;
std::unordered_map<CKeyID, CHDChain, SaltedSipHasher> m_inactive_hd_chains;
/* HD derive new child key (on internal or external chain) */
void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore);
std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore);
std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore);
int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0;
std::map<CKeyID, int64_t> m_pool_key_to_index;
// Tracks keypool indexes to CKeyIDs of keys that have been taken out of the keypool but may be returned to it
std::map<int64_t, CKeyID> m_index_to_reserved_key;
@ -376,12 +441,11 @@ private:
bool TopUpChain(WalletBatch& batch, CHDChain& chain, unsigned int size);
public:
LegacyScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size) : ScriptPubKeyMan(storage), m_keypool_size(keypool_size) {}
LegacyScriptPubKeyMan(WalletStorage& storage, int64_t keypool_size) : LegacyDataSPKM(storage), m_keypool_size(keypool_size) {}
util::Result<CTxDestination> GetNewDestination(const OutputType type) override;
isminetype IsMine(const CScript& script) const override;
bool CheckDecryptionKey(const CKeyingMaterial& master_key) override;
bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override;
util::Result<CTxDestination> GetReservedDestination(const OutputType type, bool internal, int64_t& index, CKeyPool& keypool) override;
@ -415,8 +479,6 @@ public:
bool CanGetAddresses(bool internal = false) const override;
std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override;
bool CanProvide(const CScript& script, SignatureData& sigdata) override;
bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, bilingual_str>& input_errors) const override;
@ -425,58 +487,27 @@ public:
uint256 GetID() const override;
// Map from Key ID to key metadata.
std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore);
// Map from Script ID to key metadata (for watch-only keys).
std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore);
//! Adds a key to the store, and saves it to disk.
bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
//! Adds a key to the store, without saving it to disk (used by LoadWallet)
bool LoadKey(const CKey& key, const CPubKey &pubkey);
//! Adds an encrypted key to the store, and saves it to disk.
bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
//! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet)
bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret, bool checksum_valid);
void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Adds a CScript to the store
bool LoadCScript(const CScript& redeemScript);
//! Load metadata (used by LoadWallet)
void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata);
void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata);
void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata) override;
void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata) override;
//! Generate a new key
CPubKey GenerateNewKey(WalletBatch& batch, CHDChain& hd_chain, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
/* Set the HD chain model (chain child index counters) and writes it to the database */
void AddHDChain(const CHDChain& chain);
//! Load a HD chain model (used by LoadWallet)
void LoadHDChain(const CHDChain& chain);
const CHDChain& GetHDChain() const { return m_hd_chain; }
void AddInactiveHDChain(const CHDChain& chain);
//! Adds a watch-only address to the store, without saving it to disk (used by LoadWallet)
bool LoadWatchOnly(const CScript &dest);
//! Returns whether the watch-only script is in the wallet
bool HaveWatchOnly(const CScript &dest) const;
//! Returns whether there are any watch-only things in the wallet
bool HaveWatchOnly() const;
//! Remove a watch only script from the keystore
bool RemoveWatchOnly(const CScript &dest);
bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Fetches a pubkey from mapWatchKeys if it exists there
bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const;
/* SigningProvider overrides */
bool HaveKey(const CKeyID &address) const override;
bool GetKey(const CKeyID &address, CKey& keyOut) const override;
bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
bool AddCScript(const CScript& redeemScript) override;
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override;
//! Load a keypool entry
void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool);
bool NewKeyPool();
void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
@ -525,28 +556,15 @@ public:
const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; }
std::set<CKeyID> GetKeys() const override;
std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
/**
* Retrieves scripts that were imported by bugs into the legacy spkm and are
* simply invalid, such as a sh(sh(pkh())) script, or not watched.
*/
std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const;
/** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan.
* Does not modify this ScriptPubKeyMan. */
std::optional<MigrationData> MigrateToDescriptor();
/** Delete all the records ofthis LegacyScriptPubKeyMan from disk*/
bool DeleteRecords();
};
/** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr. Does not provide privkeys */
class LegacySigningProvider : public SigningProvider
{
private:
const LegacyScriptPubKeyMan& m_spk_man;
const LegacyDataSPKM& m_spk_man;
public:
explicit LegacySigningProvider(const LegacyScriptPubKeyMan& spk_man) : m_spk_man(spk_man) {}
explicit LegacySigningProvider(const LegacyDataSPKM& spk_man) : m_spk_man(spk_man) {}
bool GetCScript(const CScriptID &scriptid, CScript& script) const override { return m_spk_man.GetCScript(scriptid, script); }
bool HaveCScript(const CScriptID &scriptid) const override { return m_spk_man.HaveCScript(scriptid); }

View File

@ -18,6 +18,7 @@
#ifdef USE_SQLITE
#include <wallet/sqlite.h>
#endif
#include <wallet/migrate.h>
#include <wallet/test/util.h>
#include <wallet/walletutil.h> // for WALLET_FLAG_DESCRIPTORS
@ -134,6 +135,8 @@ static std::vector<std::unique_ptr<WalletDatabase>> TestDatabases(const fs::path
bilingual_str error;
#ifdef USE_BDB
dbs.emplace_back(MakeBerkeleyDatabase(path_root / "bdb", options, status, error));
// Needs BDB to make the DB to read
dbs.emplace_back(std::make_unique<BerkeleyRODatabase>(BDBDataFile(path_root / "bdb"), /*open=*/false));
#endif
#ifdef USE_SQLITE
dbs.emplace_back(MakeSQLiteDatabase(path_root / "sqlite", options, status, error));
@ -148,11 +151,16 @@ BOOST_AUTO_TEST_CASE(db_cursor_prefix_range_test)
for (const auto& database : TestDatabases(m_path_root)) {
std::vector<std::string> prefixes = {"", "FIRST", "SECOND", "P\xfe\xff", "P\xff\x01", "\xff\xff"};
// Write elements to it
std::unique_ptr<DatabaseBatch> handler = Assert(database)->MakeBatch();
for (unsigned int i = 0; i < 10; i++) {
for (const auto& prefix : prefixes) {
BOOST_CHECK(handler->Write(std::make_pair(prefix, i), i));
if (dynamic_cast<BerkeleyRODatabase*>(database.get())) {
// For BerkeleyRO, open the file now. This must happen after BDB has written to the file
database->Open();
} else {
// Write elements to it if not berkeleyro
for (unsigned int i = 0; i < 10; i++) {
for (const auto& prefix : prefixes) {
BOOST_CHECK(handler->Write(std::make_pair(prefix, i), i));
}
}
}
@ -180,6 +188,8 @@ BOOST_AUTO_TEST_CASE(db_cursor_prefix_range_test)
// Let's now read it once more, it should return DONE
BOOST_CHECK(cursor->Next(key, value) == DatabaseCursor::Status::DONE);
}
handler.reset();
database->Close();
}
}
@ -199,13 +209,23 @@ BOOST_AUTO_TEST_CASE(db_cursor_prefix_byte_test)
ffs{StringData("\xff\xffsuffix"), StringData("ffs")};
for (const auto& database : TestDatabases(m_path_root)) {
std::unique_ptr<DatabaseBatch> batch = database->MakeBatch();
for (const auto& [k, v] : {e, p, ps, f, fs, ff, ffs}) {
batch->Write(Span{k}, Span{v});
if (dynamic_cast<BerkeleyRODatabase*>(database.get())) {
// For BerkeleyRO, open the file now. This must happen after BDB has written to the file
database->Open();
} else {
// Write elements to it if not berkeleyro
for (const auto& [k, v] : {e, p, ps, f, fs, ff, ffs}) {
batch->Write(Span{k}, Span{v});
}
}
CheckPrefix(*batch, StringBytes(""), {e, p, ps, f, fs, ff, ffs});
CheckPrefix(*batch, StringBytes("prefix"), {p, ps});
CheckPrefix(*batch, StringBytes("\xff"), {f, fs, ff, ffs});
CheckPrefix(*batch, StringBytes("\xff\xff"), {ff, ffs});
batch.reset();
database->Close();
}
}
@ -215,6 +235,10 @@ BOOST_AUTO_TEST_CASE(db_availability_after_write_error)
// To simulate the behavior, record overwrites are disallowed, and the test verifies
// that the database remains active after failing to store an existing record.
for (const auto& database : TestDatabases(m_path_root)) {
if (dynamic_cast<BerkeleyRODatabase*>(database.get())) {
// Skip this test if BerkeleyRO
continue;
}
// Write original record
std::unique_ptr<DatabaseBatch> batch = database->MakeBatch();
std::string key = "key";
@ -243,6 +267,10 @@ BOOST_AUTO_TEST_CASE(erase_prefix)
auto make_key = [](std::string type, std::string id) { return std::make_pair(type, id); };
for (const auto& database : TestDatabases(m_path_root)) {
if (dynamic_cast<BerkeleyRODatabase*>(database.get())) {
// Skip this test if BerkeleyRO
continue;
}
std::unique_ptr<DatabaseBatch> batch = database->MakeBatch();
// Write two entries with the same key type prefix, a third one with a different prefix

View File

@ -0,0 +1,139 @@
// Copyright (c) 2023 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <config/bitcoin-config.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
#include <test/util/setup_common.h>
#include <util/fs.h>
#include <util/time.h>
#include <util/translation.h>
#include <wallet/bdb.h>
#include <wallet/db.h>
#include <wallet/dump.h>
#include <wallet/migrate.h>
#include <fstream>
#include <iostream>
using wallet::DatabaseOptions;
using wallet::DatabaseStatus;
namespace {
TestingSetup* g_setup;
} // namespace
void initialize_wallet_bdb_parser()
{
static auto testing_setup = MakeNoLogFileContext<TestingSetup>();
g_setup = testing_setup.get();
}
FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
const auto wallet_path = g_setup->m_args.GetDataDirNet() / "fuzzed_wallet.dat";
{
AutoFile outfile{fsbridge::fopen(wallet_path, "wb")};
outfile << Span{buffer};
}
const DatabaseOptions options{};
DatabaseStatus status;
bilingual_str error;
fs::path bdb_ro_dumpfile{g_setup->m_args.GetDataDirNet() / "fuzzed_dumpfile_bdb_ro.dump"};
if (fs::exists(bdb_ro_dumpfile)) { // Writing into an existing dump file will throw an exception
remove(bdb_ro_dumpfile);
}
g_setup->m_args.ForceSetArg("-dumpfile", fs::PathToString(bdb_ro_dumpfile));
#ifdef USE_BDB
bool bdb_ro_err = false;
bool bdb_ro_pgno_err = false;
#endif
auto db{MakeBerkeleyRODatabase(wallet_path, options, status, error)};
if (db) {
assert(DumpWallet(g_setup->m_args, *db, error));
} else {
#ifdef USE_BDB
bdb_ro_err = true;
#endif
if (error.original == "AutoFile::ignore: end of file: iostream error" ||
error.original == "AutoFile::read: end of file: iostream error" ||
error.original == "Not a BDB file" ||
error.original == "Unsupported BDB data file version number" ||
error.original == "Unexpected page type, should be 9 (BTree Metadata)" ||
error.original == "Unexpected database flags, should only be 0x20 (subdatabases)" ||
error.original == "Unexpected outer database root page type" ||
error.original == "Unexpected number of entries in outer database root page" ||
error.original == "Subdatabase has an unexpected name" ||
error.original == "Subdatabase page number has unexpected length" ||
error.original == "Unexpected inner database page type" ||
error.original == "Unknown record type in records page" ||
error.original == "Unknown record type in internal page" ||
error.original == "Unexpected page size" ||
error.original == "Unexpected page type" ||
error.original == "Page number mismatch" ||
error.original == "Bad btree level" ||
error.original == "Bad page size" ||
error.original == "File size is not a multiple of page size" ||
error.original == "Meta page number mismatch")
{
// Do nothing
} else if (error.original == "Subdatabase last page is greater than database last page" ||
error.original == "Page number is greater than database last page" ||
error.original == "Page number is greater than subdatabase last page" ||
error.original == "Last page number could not fit in file")
{
#ifdef USE_BDB
bdb_ro_pgno_err = true;
#endif
} else {
throw std::runtime_error(error.original);
}
}
#ifdef USE_BDB
// Try opening with BDB
fs::path bdb_dumpfile{g_setup->m_args.GetDataDirNet() / "fuzzed_dumpfile_bdb.dump"};
if (fs::exists(bdb_dumpfile)) { // Writing into an existing dump file will throw an exception
remove(bdb_dumpfile);
}
g_setup->m_args.ForceSetArg("-dumpfile", fs::PathToString(bdb_dumpfile));
try {
auto db{MakeBerkeleyDatabase(wallet_path, options, status, error)};
if (bdb_ro_err && !db) {
return;
}
assert(db);
if (bdb_ro_pgno_err) {
// BerkeleyRO will throw on opening for errors involving bad page numbers, but BDB does not.
// Ignore those.
return;
}
assert(!bdb_ro_err);
assert(DumpWallet(g_setup->m_args, *db, error));
}
catch (const std::runtime_error& e) {
if (bdb_ro_err) return;
throw e;
}
// Make sure the dumpfiles match
if (fs::exists(bdb_ro_dumpfile) && fs::exists(bdb_dumpfile)) {
std::ifstream bdb_ro_dump(bdb_ro_dumpfile, std::ios_base::binary|std::ios_base::in);
std::ifstream bdb_dump(bdb_dumpfile, std::ios_base::binary|std::ios_base::in);
assert(std::equal(
std::istreambuf_iterator<char>(bdb_ro_dump.rdbuf()),
std::istreambuf_iterator<char>(),
std::istreambuf_iterator<char>(bdb_dump.rdbuf())
));
}
#endif
}

View File

@ -93,11 +93,6 @@ CTxDestination getNewDestination(CWallet& w, OutputType output_type)
return *Assert(w.GetNewDestination(output_type, ""));
}
// BytePrefix compares equality with other byte spans that begin with the same prefix.
struct BytePrefix { Span<const std::byte> prefix; };
bool operator<(BytePrefix a, Span<const std::byte> b) { return a.prefix < b.subspan(0, std::min(a.prefix.size(), b.size())); }
bool operator<(Span<const std::byte> a, BytePrefix b) { return a.subspan(0, std::min(a.size(), b.prefix.size())) < b.prefix; }
MockableCursor::MockableCursor(const MockableData& records, bool pass, Span<const std::byte> prefix)
{
m_pass = pass;

View File

@ -375,7 +375,12 @@ std::shared_ptr<CWallet> CreateWallet(WalletContext& context, const std::string&
uint64_t wallet_creation_flags = options.create_flags;
const SecureString& passphrase = options.create_passphrase;
ArgsManager& args = *Assert(context.args);
if (wallet_creation_flags & WALLET_FLAG_DESCRIPTORS) options.require_format = DatabaseFormat::SQLITE;
else if (args.GetBoolArg("-swapbdbendian", false)) {
options.require_format = DatabaseFormat::BERKELEY_SWAP;
}
// Indicate that the wallet is actually supposed to be blank and not just blank to make it encrypted
bool create_blank = (wallet_creation_flags & WALLET_FLAG_BLANK_WALLET);
@ -3599,6 +3604,16 @@ LegacyScriptPubKeyMan* CWallet::GetLegacyScriptPubKeyMan() const
return dynamic_cast<LegacyScriptPubKeyMan*>(it->second);
}
LegacyDataSPKM* CWallet::GetLegacyDataSPKM() const
{
if (IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
return nullptr;
}
auto it = m_internal_spk_managers.find(OutputType::LEGACY);
if (it == m_internal_spk_managers.end()) return nullptr;
return dynamic_cast<LegacyDataSPKM*>(it->second);
}
LegacyScriptPubKeyMan* CWallet::GetOrCreateLegacyScriptPubKeyMan()
{
SetupLegacyScriptPubKeyMan();
@ -3615,13 +3630,26 @@ void CWallet::AddScriptPubKeyMan(const uint256& id, std::unique_ptr<ScriptPubKey
MaybeUpdateBirthTime(spkm->GetTimeFirstKey());
}
LegacyDataSPKM* CWallet::GetOrCreateLegacyDataSPKM()
{
SetupLegacyScriptPubKeyMan();
return GetLegacyDataSPKM();
}
void CWallet::SetupLegacyScriptPubKeyMan()
{
if (!m_internal_spk_managers.empty() || !m_external_spk_managers.empty() || !m_spk_managers.empty() || IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
return;
}
auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this, m_keypool_size));
std::unique_ptr<ScriptPubKeyMan> spk_manager;
// Only create base LegacyDataSPKM if using BERKELEY_RO
if (m_database->Format() == "bdb_ro") {
spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyDataSPKM(*this));
} else {
spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this, m_keypool_size));
}
for (const auto& type : LEGACY_OUTPUT_TYPES) {
m_internal_spk_managers[type] = spk_manager.get();
m_external_spk_managers[type] = spk_manager.get();
@ -3989,7 +4017,7 @@ std::optional<MigrationData> CWallet::GetDescriptorsForLegacy(bilingual_str& err
{
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
LegacyDataSPKM* legacy_spkm = GetLegacyDataSPKM();
if (!Assume(legacy_spkm)) {
// This shouldn't happen
error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"));
@ -4008,7 +4036,7 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error)
{
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
LegacyDataSPKM* legacy_spkm = GetLegacyDataSPKM();
if (!Assume(legacy_spkm)) {
// This shouldn't happen
error = Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"));
@ -4343,11 +4371,37 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
// If the wallet is still loaded, unload it so that nothing else tries to use it while we're changing it
bool was_loaded = false;
if (auto wallet = GetWallet(context, wallet_name)) {
if (wallet->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
return util::Error{_("Error: This wallet is already a descriptor wallet")};
}
if (!RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings)) {
return util::Error{_("Unable to unload the wallet before migrating")};
}
UnloadWallet(std::move(wallet));
was_loaded = true;
} else {
// We need to load this wallet to check if there's something to migrate.
// But this cannot be reused later since we want to use BERKELEY_RO for the actual migration
WalletContext empty_context;
empty_context.args = context.args;
DatabaseOptions options;
options.require_existing = true;
DatabaseStatus status;
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
if (!database) {
return util::Error{Untranslated("Wallet file verification failed.") + Untranslated(" ") + error};
}
wallet = CWallet::Create(empty_context, wallet_name, std::move(database), options.create_flags, error, warnings);
if (!wallet) {
return util::Error{Untranslated("Wallet loading failed.") + Untranslated(" ") + error};
}
if (wallet->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
return util::Error{_("Error: This wallet is already a descriptor wallet")};
}
// Unload
wallet.reset();
}
// Load the wallet but only in the context of this function.
@ -4356,6 +4410,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
empty_context.args = context.args;
DatabaseOptions options;
options.require_existing = true;
options.require_format = DatabaseFormat::BERKELEY_RO;
DatabaseStatus status;
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
if (!database) {
@ -4435,6 +4490,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(const std::string& walle
// both before and after reloading. This ensures the set is complete even if one of the wallets
// fails to reload.
std::set<fs::path> wallet_dirs;
options.require_format = std::nullopt;
if (success) {
// Migration successful, unload all wallets locally, then reload them.
// Reload the main wallet

View File

@ -961,6 +961,8 @@ public:
//! Get the LegacyScriptPubKeyMan which is used for all types, internal, and external.
LegacyScriptPubKeyMan* GetLegacyScriptPubKeyMan() const;
LegacyScriptPubKeyMan* GetOrCreateLegacyScriptPubKeyMan();
LegacyDataSPKM* GetLegacyDataSPKM() const;
LegacyDataSPKM* GetOrCreateLegacyDataSPKM();
//! Make a LegacyScriptPubKeyMan and set it for all types, internal, and external.
void SetupLegacyScriptPubKeyMan();

View File

@ -23,6 +23,7 @@
#ifdef USE_BDB
#include <wallet/bdb.h>
#endif
#include <wallet/migrate.h>
#ifdef USE_SQLITE
#include <wallet/sqlite.h>
#endif
@ -355,7 +356,7 @@ bool LoadKey(CWallet* pwallet, DataStream& ssKey, DataStream& ssValue, std::stri
strErr = "Error reading wallet database: CPrivKey corrupt";
return false;
}
if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey))
if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadKey(key, vchPubKey))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadKey failed";
return false;
@ -394,7 +395,7 @@ bool LoadCryptedKey(CWallet* pwallet, DataStream& ssKey, DataStream& ssValue, st
}
}
if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey, checksum_valid))
if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadCryptedKey(vchPubKey, vchPrivKey, checksum_valid))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCryptedKey failed";
return false;
@ -441,7 +442,7 @@ bool LoadHDChain(CWallet* pwallet, DataStream& ssValue, std::string& strErr)
try {
CHDChain chain;
ssValue >> chain;
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadHDChain(chain);
pwallet->GetOrCreateLegacyDataSPKM()->LoadHDChain(chain);
} catch (const std::exception& e) {
if (strErr.empty()) {
strErr = e.what();
@ -585,7 +586,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
key >> hash;
CScript script;
value >> script;
if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCScript(script))
if (!pwallet->GetOrCreateLegacyDataSPKM()->LoadCScript(script))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCScript failed";
return DBErrors::NONCRITICAL_ERROR;
@ -608,7 +609,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
key >> vchPubKey;
CKeyMetadata keyMeta;
value >> keyMeta;
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta);
pwallet->GetOrCreateLegacyDataSPKM()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta);
// Extract some CHDChain info from this metadata if it has any
if (keyMeta.nVersion >= CKeyMetadata::VERSION_WITH_HDDATA && !keyMeta.hd_seed_id.IsNull() && keyMeta.hdKeypath.size() > 0) {
@ -675,7 +676,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
// Set inactive chains
if (!hd_chains.empty()) {
LegacyScriptPubKeyMan* legacy_spkm = pwallet->GetLegacyScriptPubKeyMan();
LegacyDataSPKM* legacy_spkm = pwallet->GetLegacyDataSPKM();
if (legacy_spkm) {
for (const auto& [hd_seed_id, chain] : hd_chains) {
if (hd_seed_id != legacy_spkm->GetHDChain().seed_id) {
@ -696,7 +697,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
uint8_t fYes;
value >> fYes;
if (fYes == '1') {
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadWatchOnly(script);
pwallet->GetOrCreateLegacyDataSPKM()->LoadWatchOnly(script);
}
return DBErrors::LOAD_OK;
});
@ -709,7 +710,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
key >> script;
CKeyMetadata keyMeta;
value >> keyMeta;
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta);
pwallet->GetOrCreateLegacyDataSPKM()->LoadScriptMetadata(CScriptID(script), keyMeta);
return DBErrors::LOAD_OK;
});
result = std::max(result, watch_meta_res.m_result);
@ -721,7 +722,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
key >> nIndex;
CKeyPool keypool;
value >> keypool;
pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool);
pwallet->GetOrCreateLegacyDataSPKM()->LoadKeyPool(nIndex, keypool);
return DBErrors::LOAD_OK;
});
result = std::max(result, pool_res.m_result);
@ -764,7 +765,7 @@ static DBErrors LoadLegacyWalletRecords(CWallet* pwallet, DatabaseBatch& batch,
// nTimeFirstKey is only reliable if all keys have metadata
if (pwallet->IsLegacy() && (key_res.m_records + ckey_res.m_records + watch_script_res.m_records) != (keymeta_res.m_records + watch_meta_res.m_records)) {
auto spk_man = pwallet->GetOrCreateLegacyScriptPubKeyMan();
auto spk_man = pwallet->GetLegacyScriptPubKeyMan();
if (spk_man) {
LOCK(spk_man->cs_KeyStore);
spk_man->UpdateTimeFirstKey(1);
@ -1389,6 +1390,11 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
return nullptr;
}
// If BERKELEY was the format, then change the format from BERKELEY to BERKELEY_RO
if (format && options.require_format && format == DatabaseFormat::BERKELEY && options.require_format == DatabaseFormat::BERKELEY_RO) {
format = DatabaseFormat::BERKELEY_RO;
}
// A db already exists so format is set, but options also specifies the format, so make sure they agree
if (format && options.require_format && format != options.require_format) {
error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", fs::PathToString(path)));
@ -1422,6 +1428,10 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
}
}
if (format == DatabaseFormat::BERKELEY_RO) {
return MakeBerkeleyRODatabase(path, options, status, error);
}
#ifdef USE_BDB
if constexpr (true) {
return MakeBerkeleyDatabase(path, options, status, error);

View File

@ -194,6 +194,11 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
ReadDatabaseArgs(args, options);
options.require_existing = true;
DatabaseStatus status;
if (args.GetBoolArg("-withinternalbdb", false) && IsBDBFile(BDBDataFile(path))) {
options.require_format = DatabaseFormat::BERKELEY_RO;
}
bilingual_str error;
std::unique_ptr<WalletDatabase> database = MakeDatabase(path, options, status, error);
if (!database) {

View File

@ -419,8 +419,9 @@ class TestNode():
return True
def wait_until_stopped(self, *, timeout=BITCOIND_PROC_WAIT_TIMEOUT, expect_error=False, **kwargs):
expected_ret_code = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS
self.wait_until(lambda: self.is_node_stopped(expected_ret_code=expected_ret_code, **kwargs), timeout=timeout)
if "expected_ret_code" not in kwargs:
kwargs["expected_ret_code"] = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS
self.wait_until(lambda: self.is_node_stopped(**kwargs), timeout=timeout)
def replace_in_config(self, replacements):
"""

View File

@ -176,6 +176,8 @@ BASE_SCRIPTS = [
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py --legacy-wallet',
'tool_wallet.py --legacy-wallet --bdbro',
'tool_wallet.py --legacy-wallet --bdbro --swap-bdb-endian',
'tool_wallet.py --descriptors',
'tool_signet_miner.py --legacy-wallet',
'tool_signet_miner.py --descriptors',

View File

@ -5,6 +5,7 @@
"""Test bitcoin-wallet."""
import os
import platform
import stat
import subprocess
import textwrap
@ -14,6 +15,7 @@ from collections import OrderedDict
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
sha256sum_file,
)
@ -21,11 +23,15 @@ from test_framework.util import (
class ToolWalletTest(BitcoinTestFramework):
def add_options(self, parser):
self.add_wallet_options(parser)
parser.add_argument("--bdbro", action="store_true", help="Use the BerkeleyRO internal parser when dumping a Berkeley DB wallet file")
parser.add_argument("--swap-bdb-endian", action="store_true",help="When making Legacy BDB wallets, always make then byte swapped internally")
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
if self.options.swap_bdb_endian:
self.extra_args = [["-swapbdbendian"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@ -35,15 +41,21 @@ class ToolWalletTest(BitcoinTestFramework):
default_args = ['-datadir={}'.format(self.nodes[0].datadir_path), '-chain=%s' % self.chain]
if not self.options.descriptors and 'create' in args:
default_args.append('-legacy')
if "dump" in args and self.options.bdbro:
default_args.append("-withinternalbdb")
return subprocess.Popen([self.options.bitcoinwallet] + default_args + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
if isinstance(error, tuple):
assert_equal(p.poll(), error[0])
assert error[1] in stderr.strip()
else:
assert_equal(p.poll(), 1)
assert error in stderr.strip()
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
@ -451,6 +463,88 @@ class ToolWalletTest(BitcoinTestFramework):
''')
self.assert_tool_output(expected_output, "-wallet=conflicts", "info")
def test_dump_endianness(self):
self.log.info("Testing dumps of the same contents with different BDB endianness")
self.start_node(0)
self.nodes[0].createwallet("endian")
self.stop_node(0)
wallet_dump = self.nodes[0].datadir_path / "endian.dump"
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Bitcoin, do not share the dumpfile.\n", "-wallet=endian", f"-dumpfile={wallet_dump}", "dump")
expected_dump = self.read_dump(wallet_dump)
self.do_tool_createfromdump("native_endian", "endian.dump", "bdb")
native_dump = self.read_dump(self.nodes[0].datadir_path / "rt-native_endian.dump")
self.assert_dump(expected_dump, native_dump)
self.do_tool_createfromdump("other_endian", "endian.dump", "bdb_swap")
other_dump = self.read_dump(self.nodes[0].datadir_path / "rt-other_endian.dump")
self.assert_dump(expected_dump, other_dump)
def test_dump_very_large_records(self):
self.log.info("Test that wallets with large records are successfully dumped")
self.start_node(0)
self.nodes[0].createwallet("bigrecords")
wallet = self.nodes[0].get_wallet_rpc("bigrecords")
# Both BDB and sqlite have maximum page sizes of 65536 bytes, with defaults of 4096
# When a record exceeds some size threshold, both BDB and SQLite will store the data
# in one or more overflow pages. We want to make sure that our tooling can dump such
# records, even when they span multiple pages. To make a large record, we just need
# to make a very big transaction.
self.generate(self.nodes[0], 101)
def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
outputs = {}
for i in range(500):
outputs[wallet.getnewaddress(address_type="p2sh-segwit")] = 0.01
def_wallet.sendmany(amounts=outputs)
self.generate(self.nodes[0], 1)
send_res = wallet.sendall([def_wallet.getnewaddress()])
self.generate(self.nodes[0], 1)
assert_equal(send_res["complete"], True)
tx = wallet.gettransaction(txid=send_res["txid"], verbose=True)
assert_greater_than(tx["decoded"]["size"], 70000)
self.stop_node(0)
wallet_dump = self.nodes[0].datadir_path / "bigrecords.dump"
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Bitcoin, do not share the dumpfile.\n", "-wallet=bigrecords", f"-dumpfile={wallet_dump}", "dump")
dump = self.read_dump(wallet_dump)
for k,v in dump.items():
if tx["hex"] in v:
break
else:
assert False, "Big transaction was not found in wallet dump"
def test_dump_unclean_lsns(self):
if not self.options.bdbro:
return
self.log.info("Test that a legacy wallet that has not been compacted is not dumped by bdbro")
self.start_node(0, extra_args=["-flushwallet=0"])
self.nodes[0].createwallet("unclean_lsn")
wallet = self.nodes[0].get_wallet_rpc("unclean_lsn")
# First unload and load normally to make sure everything is written
wallet.unloadwallet()
self.nodes[0].loadwallet("unclean_lsn")
# Next cause a bunch of writes by filling the keypool
wallet.keypoolrefill(wallet.getwalletinfo()["keypoolsize"] + 100)
# Lastly kill bitcoind so that the LSNs don't get reset
self.nodes[0].process.kill()
self.nodes[0].wait_until_stopped(expected_ret_code=1 if platform.system() == "Windows" else -9)
assert self.nodes[0].is_node_stopped()
wallet_dump = self.nodes[0].datadir_path / "unclean_lsn.dump"
self.assert_raises_tool_error("LSNs are not reset, this database is not completely flushed. Please reopen then close the database with a version that has BDB support", "-wallet=unclean_lsn", f"-dumpfile={wallet_dump}", "dump")
# File can be dumped after reload it normally
self.start_node(0)
self.nodes[0].loadwallet("unclean_lsn")
self.stop_node(0)
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Bitcoin, do not share the dumpfile.\n", "-wallet=unclean_lsn", f"-dumpfile={wallet_dump}", "dump")
def run_test(self):
self.wallet_path = self.nodes[0].wallets_path / self.default_wallet_name / self.wallet_data_filename
self.test_invalid_tool_commands_and_args()
@ -462,8 +556,11 @@ class ToolWalletTest(BitcoinTestFramework):
if not self.options.descriptors:
# Salvage is a legacy wallet only thing
self.test_salvage()
self.test_dump_endianness()
self.test_dump_unclean_lsns()
self.test_dump_createfromdump()
self.test_chainless_conflicts()
self.test_dump_very_large_records()
if __name__ == '__main__':
ToolWalletTest().main()