Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Checkpoints #99

Merged
merged 13 commits into from
Dec 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ libhsk_la_SOURCES = src/addr.c \
src/sha3.c \
src/sig0.c \
src/siphash.c \
src/store.c \
src/timedata.c \
src/utils.c \
src/secp256k1/secp256k1.c
Expand Down
19 changes: 19 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,16 @@ and some of them are reqired to be installed manually.
$ hnsd [options]
```

**Reccomended usage:**

```sh
mkdir ~/.hnsd
hnsd -t -x ~/.hnsd
```

This will start hnsd sync from the hard-coded checkpoint and continue to save
its own checkpoints to disk to ensure rapid chain sync on future boots.

### Options

```
Expand Down Expand Up @@ -336,6 +346,15 @@ $ hnsd [options]
-l, --log-file <filename>
Redirect output to a log file.

-a, --user-agent <string>
Add supplemental user agent string in p2p version message.

-t, --checkpoint
Start chain sync from checkpoint.

-x, --prefix <directory name>
Write/read state to/from disk in given directory.

-d, --daemon
Fork and background the process.

Expand Down
72 changes: 68 additions & 4 deletions integration/test-util.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
const {spawn, execSync} = require('child_process');
const assert = require('bsert');
const path = require('path');
const {FullNode} = require('hsd');
const {FullNode, packets} = require('hsd');
const wire = require('bns/lib/wire');
const StubResolver = require('bns/lib/resolver/stub');
const {EOL} = require('os');
Expand All @@ -28,13 +28,51 @@ class TestUtil {
plugins: [require('hsd/lib/wallet/plugin')]
});

// Packets received by full node from hnsd
this.packetsFrom = {};
this.node.pool.on('packet', (packet) => {
const type = packets.typesByVal[packet.type];
if (!this.packetsFrom[type])
this.packetsFrom[type] = [packet];
else
this.packetsFrom[type].push(packet);
});

// Packets sent to hnsd by the full node
this.packetsTo = {};
this.node.pool.on('peer open', (peer) => {
peer.SEND = peer.send;

peer.send = (packet) => {
const type = packets.typesByVal[packet.type];
if (!this.packetsTo[type])
this.packetsTo[type] = [packet];
else
this.packetsTo[type].push(packet);

peer.SEND(packet);
};
});

this.wallet = null;

this.resolver = new StubResolver();
this.resolver.setServers(['127.0.0.1:25349']);

this.hnsd = null;
this.hnsdHeight = 0;
this.hnsdArgsBase = ['-s', '127.0.0.1:10000'];
this.hnsdArgs = this.hnsdArgsBase;
}

extraArgs(args) {
assert(Array.isArray(args));
this.hnsdArgs = this.hnsdArgs.concat(args);
}

resetPackets() {
this.packetsTo = {};
this.packetsFrom = {};
}

async open() {
Expand All @@ -54,22 +92,48 @@ class TestUtil {
return this.openHNSD();
}

async close() {
this.closeHNSD();
await this.node.close();
}

async openHNSD() {
return new Promise((resolve, reject) => {
this.hnsd = spawn(
path.join(__dirname, '..', 'hnsd'),
['-s', `${this.host}:${this.port}`],
this.hnsdArgs,
{stdio: 'ignore'}
);

this.hnsd.on('spawn', () => resolve());
this.hnsd.on('error', () => reject());
this.hnsd.on('close', this.crash);
});
}

async close() {
crash(code, signal) {
throw new Error(`hnsd crashed with code: ${code} and signal: ${signal}`);
}

closeHNSD() {
if (!this.hnsd)
return;

this.hnsd.removeListener('close', this.crash);

this.hnsd.kill('SIGKILL');
await this.node.close();
this.hnsd = null;
}

async restartHNSD(args) {
this.closeHNSD();

if (args) {
assert(Array.isArray(args));
this.hnsdArgs = this.hnsdArgsBase.concat(args);
}

return this.openHNSD();
}

async getWalletAddress() {
Expand Down
245 changes: 245 additions & 0 deletions integration/test/checkpoints-test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,245 @@
/* eslint-env mocha */
/* eslint prefer-arrow-callback: "off" */
/* eslint max-len: "off" */
/* eslint no-return-assign: "off" */

'use strict';

const os = require('os');
const fs = require('fs');
const path = require('path');
const assert = require('bsert');
const TestUtil = require('../test-util');
const util = new TestUtil();

describe('Checkpoints', function() {
this.timeout(20000);

let tmpdir = os.tmpdir();
tmpdir = path.join(tmpdir, `hnsd-test-${Date.now()}`);

before(async () => {
await fs.mkdirSync(tmpdir);
util.extraArgs(['-x', tmpdir]); // enable automatic checkpoints on disk
await util.open();
});

after(async () => {
await util.close();
});

beforeEach(() => {
util.resetPackets();
});

async function hashesToHeights(hashes) {
assert(Array.isArray(hashes));

const heights = [];
for (const hash of hashes)
heights.push(await util.node.chain.getMainHeight(hash));

return heights;
}

it('should initial sync', async() => {
await util.generate(1000);
await util.waitForSync();

assert(util.packetsFrom.GETHEADERS.length);
const {locator} = util.packetsFrom.GETHEADERS.shift();

// Just the genesis block
assert.strictEqual(locator.length, 1);
assert.bufferEqual(locator[0], util.node.network.genesis.hash);
});

it('should restart from checkpoint with no peers', async () => {
// Disconnect full node
await util.node.pool.close();

await util.restartHNSD(['-x', tmpdir]);

// Sanity check: these blocks should not be received by hnsd
await util.generate(100);

// Last height hnsd synced to was 1000.
// It should have saved a checkpoint containing the first 150
// blocks of the checkpoint window which on regtest is 200 blocks.
// Upon restart, it should automatically intialize through that checkpoint.
const {hnsd} = await util.getHeights();
assert.strictEqual(hnsd, 949);
});

it('should continue syncing after starting from checkpoint', async () => {
// Reconnect full node
await util.node.pool.open();
await util.node.pool.connect();

await util.waitForSync();
const {hnsd, hsd} = await util.getHeights();
assert.strictEqual(hsd, hnsd);
assert.strictEqual(hnsd, 1100);

// Full node sent 151 headers, starting at height 950
assert(util.packetsTo.HEADERS.length);
const {items} = util.packetsTo.HEADERS.shift();
assert.strictEqual(items.length, 151);
const hashes = items.map(h => h.hash());
const heights = await hashesToHeights(hashes);
assert.strictEqual(heights[0], 950);
});

it('should restart from checkpoint and resync', async () => {
await util.restartHNSD(['-x', tmpdir]);
await util.waitForSync();

assert(util.packetsFrom.GETHEADERS.length);
const {locator} = util.packetsFrom.GETHEADERS.shift();
const locatorHeights = await hashesToHeights(locator);

assert.deepStrictEqual(
locatorHeights,
[
949, // tip
// 10 prev blocks
948, 947, 946, 945, 944, 943, 942, 941, 940, 939,
938, // -1
936, // -2
932, // -4
924, // -8
908, // -16
876, // -32
812, // -64
0 // hnsd doesn't have any blocks lower than 800, so skip to genesis
]
);

const {hnsd, hsd} = await util.getHeights();
assert.strictEqual(hsd, hnsd);
assert.strictEqual(hnsd, 1100);

// Full node sent 151 headers, starting at height 950
assert(util.packetsTo.HEADERS.length);
const {items} = util.packetsTo.HEADERS.shift();
assert.strictEqual(items.length, 151);
const hashes = items.map(h => h.hash());
const headersHeights = await hashesToHeights(hashes);
assert.strictEqual(headersHeights[0], 950);
});

it('should resync from checkpoint after a reorg (after)', async () => {
// Disconnect full node
await util.node.pool.close();

// Reorg chain
// Fork point comes AFTER checkpoint
const hash = await util.node.chain.getHash(1001);
await util.node.chain.invalidate(hash);
{
const {hsd, hnsd} = await util.getHeights();
assert.strictEqual(hsd, 1000);
assert.strictEqual(hnsd, 1100);
}
await util.generate(110);

// Reconnect full node
util.resetPackets();
await util.restartHNSD(['-x', tmpdir]);
await util.node.pool.open();
await util.node.pool.connect();

await util.waitForSync();
const {hnsd, hsd} = await util.getHeights();
assert.strictEqual(hsd, hnsd);
assert.strictEqual(hnsd, 1110);

// Full node sent 161 headers, starting at height 950
// (reorg fork was at height 930, last locator hash was at 949)
assert(util.packetsTo.HEADERS.length);
const {items} = util.packetsTo.HEADERS.shift();
assert.strictEqual(items.length, 161);
const hashes = items.map(h => h.hash());
const headersHeights = await hashesToHeights(hashes);
assert.strictEqual(headersHeights[0], 950);
});

it('should resync from checkpoint after a reorg (inside)', async () => {
// Disconnect full node
await util.node.pool.close();

// Reorg chain
// Fork point comes INSIDE checkpoint
const hash = await util.node.chain.getHash(931);
await util.node.chain.invalidate(hash);
{
const {hsd, hnsd} = await util.getHeights();
assert.strictEqual(hsd, 930);
assert.strictEqual(hnsd, 1110);
}
await util.generate(190);

// Reconnect full node
util.resetPackets();
await util.restartHNSD(['-x', tmpdir]);
await util.node.pool.open();
await util.node.pool.connect();

await util.waitForSync();
const {hnsd, hsd} = await util.getHeights();
assert.strictEqual(hsd, hnsd);
assert.strictEqual(hnsd, 1120);

// Full node sent 196 headers, starting at height 925
// (reorg fork was at height 930, previous locator hash was at 924)
assert(util.packetsTo.HEADERS.length);
const {items} = util.packetsTo.HEADERS.shift();
assert.strictEqual(items.length, 196);
const hashes = items.map(h => h.hash());
const headersHeights = await hashesToHeights(hashes);
assert.strictEqual(headersHeights[0], 925);
});

it('should resync from checkpoint after a reorg (before)', async () => {
// Disconnect full node
await util.node.pool.close();

// Reorg chain
// Fork point comes BEFORE checkpoint
const hash = await util.node.chain.getHash(801);
await util.node.chain.invalidate(hash);
{
const {hsd, hnsd} = await util.getHeights();
assert.strictEqual(hsd, 800);
assert.strictEqual(hnsd, 1120);
}
await util.generate(330);

// Reconnect full node
await util.restartHNSD(['-x', tmpdir]);
await util.node.pool.open();
await util.node.pool.connect();

await util.waitForSync();
const {hnsd, hsd} = await util.getHeights();
assert.strictEqual(hsd, hnsd);
assert.strictEqual(hnsd, 1130);

// Full node sent 1130 headers, starting at height 1
// (reorg fork was at height 800, previous locator hash was at 0)
assert(util.packetsTo.HEADERS.length);
const {items} = util.packetsTo.HEADERS.shift();
assert.strictEqual(items.length, 1130);
const hashes = items.map(h => h.hash());
const headersHeights = await hashesToHeights(hashes);
assert.strictEqual(headersHeights[0], 1);
});

it('should survive all that', async () => {
const hash = await util.resolveHS('hash.tip.chain.hnsd.');
assert.strictEqual(
hash,
util.node.chain.tip.hash.toString('hex')
);
});
});
Loading