Skip to content

Commit

Permalink
fix: if sqlite file is 25% larger than tmp backup then run VACUUM, de…
Browse files Browse the repository at this point in the history
…lete old threads not used anymore from expunged messages, concurrency check should be > 10 not >= 10
  • Loading branch information
titanism committed Aug 29, 2024
1 parent 7cccd5a commit b517e22
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 11 deletions.
59 changes: 49 additions & 10 deletions helpers/get-database.js
Original file line number Diff line number Diff line change
Expand Up @@ -598,17 +598,20 @@ async function getDatabase(
let migrateCheck = !instance.server;
let folderCheck = !instance.server;
let trashCheck = !instance.server;
let threadCheck = !instance.server;

if (instance.client && instance.server) {
try {
const results = await instance.client.mget([
`migrate_check:${session.user.alias_id}`,
`folder_check:${session.user.alias_id}`,
`trash_check:${session.user.alias_id}`
`trash_check:${session.user.alias_id}`,
`thread_check:${session.user.alias_id}`
]);
migrateCheck = boolean(results[0]);
folderCheck = boolean(results[1]);
trashCheck = boolean(results[2]);
threadCheck = boolean(results[3]);
} catch (err) {
logger.fatal(err);
}
Expand Down Expand Up @@ -674,8 +677,8 @@ async function getDatabase(
// create initial folders for the user if they do not yet exist
// (only do this once every day)
//
try {
if (!folderCheck) {
if (!folderCheck) {
try {
const paths = await Mailboxes.distinct(instance, session, 'path', {});
const required = [];
for (const path of REQUIRED_PATHS) {
Expand Down Expand Up @@ -739,16 +742,16 @@ async function getDatabase(
'PX',
ms('1d')
);
} catch (err) {
logger.fatal(err, { session });
}
} catch (err) {
logger.fatal(err, { session });
}

//
// NOTE: we remove messages in Junk/Trash folder that are >= 30 days old
// (but we only do this once every day)
try {
if (!trashCheck) {
if (!trashCheck) {
try {
const mailboxes = await Mailboxes.find(instance, session, {
path: {
$in: ['Trash', 'Spam', 'Junk']
Expand Down Expand Up @@ -811,12 +814,48 @@ async function getDatabase(
'PX',
ms('1d')
);
} catch (err) {
logger.fatal(err, { session });
}
}

//
// NOTE: we delete thread ids that don't correspond to messages anymore
//
if (!threadCheck) {
try {
const sql = builder.build({
type: 'select',
table: 'Messages',
distinct: true,
fields: ['thread']
});
const threadIds = db.prepare(sql.query).pluck().all(sql.values);
if (threadIds.length > 0) {
const removeSql = builder.build({
type: 'remove',
table: 'Threads',
condition: {
_id: {
$nin: threadIds
}
}
});
db.prepare(removeSql.query).run(removeSql.values);
}

await instance.client.set(
`thread_check:${session.user.alias_id}`,
true,
'PX',
ms('1d')
);
} catch (err) {
logger.fatal(err, { session });
}
} catch (err) {
logger.fatal(err, { session });
}

if (!migrateCheck || !folderCheck || !trashCheck) {
if (!migrateCheck || !folderCheck || !trashCheck || !threadCheck) {
try {
//
// All applications should run "PRAGMA optimize;" after a schema change,
Expand Down
2 changes: 2 additions & 0 deletions helpers/mongoose-to-sqlite.js
Original file line number Diff line number Diff line change
Expand Up @@ -825,6 +825,8 @@ async function distinct(instance, session, field, conditions = {}) {
if (!instance.wsp && instance?.constructor?.name !== 'SQLite')
throw new TypeError('WebSocketAsPromised instance required');

// TODO: use DISTINCT via `distinct: true` if only one field
// <https://github.com/2do2go/json-sql/blob/4be018c0662dacba06ddf033d18e71ebf93ee7c3/tests/1_select.js#L28-L38>
const sql = builder.build({
type: 'select',
table,
Expand Down
2 changes: 1 addition & 1 deletion helpers/on-connect.js
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ async function onConnect(session, fn) {
const key = `${prefix}:${session.remoteAddress}`;
const count = await this.client.incr(key);
await this.client.pexpire(key, config.socketTimeout);
if (count >= 10)
if (count > 10)
throw new SMTPError(
`Too many concurrent connections from ${session.remoteAddress}`,
{ responseCode: 421, ignoreHook: true }
Expand Down
61 changes: 61 additions & 0 deletions helpers/worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -783,6 +783,67 @@ async function backup(payload) {
await logger.fatal(err, { payload });
}

//
// NOTE: if the SQLite file is 2x larger than the backup, then we
// should run a VACUUM since auto vacuum isn't optimal
//
if (payload.format === 'sqlite' && tmp) {
try {
// check how much space is remaining on storage location
const storagePath = getPathToDatabase({
id: payload.session.user.alias_id,
storage_location: payload.session.user.storage_location
});
const diskSpace = await checkDiskSpace(storagePath);

// <https://github.com/nodejs/node/issues/38006>
const stats = await fs.promises.stat(storagePath);
if (!stats.isFile() || stats.size === 0) {
const err = new TypeError('Database empty');
err.stats = stats;
throw err;
}

// we calculate size of db x 2 (backup + tarball)
const spaceRequired = stats.size * 2;

if (diskSpace.free < spaceRequired)
throw new TypeError(
`Needed ${prettyBytes(spaceRequired)} but only ${prettyBytes(
diskSpace.free
)} was available`
);

//
// check if main sqlite file is >= 25% larger than tmp file
//
// <https://github.com/nodejs/node/issues/38006>
const tmpStats = await fs.promises.stat(tmp);
if (!tmpStats.isFile() || tmpStats.size === 0) {
const err = new TypeError('Database empty');
err.stats = stats;
throw err;
}

if (stats.size >= Math.round(tmpStats.size * 1.25)) {
const db = await getDatabase(
instance,
// alias
{
id: payload.session.user.alias_id,
storage_location: payload.session.user.storage_location
},
payload.session
);
db.prepare('VACUUM').run();
await closeDatabase(db);
}
} catch (_err) {
_err.isCodeBug = true;
await logger.fatal(_err, { payload });
}
}

// always do cleanup in case of errors
if (tmp && backup) {
try {
Expand Down

0 comments on commit b517e22

Please sign in to comment.