Skip to content

Commit

Permalink
[cwksp] Align all allocated "tables" and "aligneds" to 64 bytes (#2546)
Browse files Browse the repository at this point in the history
* Perform 64-byte alignment of wksp tables and aligneds internally

* Clean up cwskp_finalize() function to only do two allocs

* Refactor aligned/buffer reservation code, remove ASAN req for alignment reservations

* Change from allocating 128 bytes always to allocating only buffer space as needed for tables/aligned

* Back out aligned/table reservation order restriction

* Add stricter bounds for new/resized wksps, fix comment in zstd_cwksp.h
  • Loading branch information
senhuang42 authored Apr 2, 2021
1 parent 255925c commit 980f3bb
Show file tree
Hide file tree
Showing 2 changed files with 167 additions and 64 deletions.
40 changes: 21 additions & 19 deletions lib/compress/zstd_compress.c
Original file line number Diff line number Diff line change
Expand Up @@ -1339,18 +1339,23 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
size_t const slackSpace = ZSTD_cwksp_slack_space_required();

/* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);

DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
(U32)chainSize, (U32)hSize, (U32)h3Size);
return tableSpace + optSpace;
return tableSpace + optSpace + slackSpace;
}

static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
Expand All @@ -1366,7 +1371,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
U32 const divider = (cParams->minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+ ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
Expand All @@ -1375,7 +1380,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm ?
ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;


size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
Expand Down Expand Up @@ -1703,19 +1708,20 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&params.cParams, &params.ldmParams, zc->staticSize != 0,
buffInSize, buffOutSize, pledgedSrcSize);
int resizeWorkspace;

FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");

if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);

/* Check if workspace is large enough, alloc a new one if needed */
{
{ /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);

resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);

if (workspaceTooSmall || workspaceWasteful) {
if (resizeWorkspace) {
DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
Expand Down Expand Up @@ -1814,13 +1820,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->ldmState.loadedDictEnd = 0;
}

/* Due to alignment, when reusing a workspace, we can actually consume
* up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
*/
assert(ZSTD_cwksp_used(ws) >= neededSpace &&
ZSTD_cwksp_used(ws) <= neededSpace + 3);

assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));

zc->initialized = 1;

return 0;
Expand Down
191 changes: 146 additions & 45 deletions lib/compress/zstd_cwksp.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ extern "C" {
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
#endif


/* Set our tables and aligneds to align by 64 bytes */
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64

/*-*************************************
* Structures
***************************************/
Expand Down Expand Up @@ -117,10 +121,11 @@ typedef enum {
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams.
* Their sizes depend on the cparams. These tables are 64-byte aligned.
*
* - Aligned: these buffers are used for various purposes that require 4 byte
* alignment, but don't require any initialization before they're used.
* alignment, but don't require any initialization before they're used. These
* buffers are each aligned to 64 bytes.
*
* - Buffers: these buffers are used for various purposes that don't require
* any alignment or initialization before they're used. This means they can
Expand All @@ -133,8 +138,7 @@ typedef enum {
*
* 1. Objects
* 2. Buffers
* 3. Aligned
* 4. Tables
* 3. Aligned/Tables
*
* Attempts to reserve objects of different types out of order will fail.
*/
Expand Down Expand Up @@ -187,6 +191,8 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
* Since tables aren't currently redzoned, you don't need to call through this
* to figure out how much space you need for the matchState tables. Everything
* else is though.
*
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
if (size == 0)
Expand All @@ -198,30 +204,110 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
#endif
}

MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
/**
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
* Used to determine the number of bytes required for a given "aligned".
*/
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
}

/**
* Returns the amount of additional space the cwksp must allocate
* for internal purposes (currently only alignment).
*/
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
* to align the beginning of the aligned secion.
*
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
* aligneds being sized in multiples of 64 bytes.
*/
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
return slackSpace;
}


/**
* Return the number of additional bytes required to align a pointer to the given number of bytes.
* alignBytes must be a power of two.
*/
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
size_t const alignBytesMask = alignBytes - 1;
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
assert((alignBytes & alignBytesMask) == 0);
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
return bytes;
}

/**
* Internal function. Do not use directly.
* Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
* counts from the end of the wksp. (as opposed to the object/table segment)
*
* Returns a pointer to the beginning of that space.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
void* const alloc = (BYTE*)ws->allocStart - bytes;
void* const bottom = ws->tableEnd;
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
if (alloc < bottom) {
DEBUGLOG(4, "cwksp: alloc failed!");
ws->allocFailed = 1;
return NULL;
}
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
return alloc;
}

/**
* Moves the cwksp to the next phase, and does any necessary allocations.
* Returns a 0 on success, or zstd error
*/
MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
assert(phase >= ws->phase);
if (phase > ws->phase) {
/* Going from allocating objects to allocating buffers */
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
phase >= ZSTD_cwksp_alloc_buffers) {
ws->tableValidEnd = ws->objectEnd;
}

/* Going from allocating buffers to allocating aligneds/tables */
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
phase >= ZSTD_cwksp_alloc_aligned) {
/* If unaligned allocations down from a too-large top have left us
* unaligned, we need to realign our alloc ptr. Technically, this
* can consume space that is unaccounted for in the neededSpace
* calculation. However, I believe this can only happen when the
* workspace is too large, and specifically when it is too large
* by a larger margin than the space that will be consumed. */
/* TODO: cleaner, compiler warning friendly way to do this??? */
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
if (ws->allocStart < ws->tableValidEnd) {
ws->tableValidEnd = ws->allocStart;
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
size_t const bytesToAlign =
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
memory_allocation, "aligned phase - alignment initial allocation failed!");
}
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
void* const alloc = ws->objectEnd;
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
void* const end = (BYTE*)alloc + bytesToAlign;
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
"table phase - alignment initial allocation failed!");
ws->objectEnd = end;
ws->tableEnd = end;
ws->tableValidEnd = end;
}
}
ws->phase = phase;
ZSTD_cwksp_assert_internal_consistency(ws);
}
return 0;
}

/**
Expand All @@ -237,38 +323,25 @@ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
MEM_STATIC void* ZSTD_cwksp_reserve_internal(
ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
void* alloc;
void* bottom = ws->tableEnd;
ZSTD_cwksp_internal_advance_phase(ws, phase);
alloc = (BYTE *)ws->allocStart - bytes;

if (bytes == 0)
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
return NULL;
}

#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */
alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif

DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
if (alloc < bottom) {
DEBUGLOG(4, "cwksp: alloc failed!");
ws->allocFailed = 1;
return NULL;
}
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);

#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
if (alloc) {
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
}
}
#endif

Expand All @@ -283,28 +356,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
}

/**
* Reserves and returns memory sized on and aligned on sizeof(unsigned).
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
*/
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
assert((bytes & (sizeof(U32)-1)) == 0);
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
ZSTD_cwksp_alloc_aligned);
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
return ptr;
}

/**
* Aligned on sizeof(unsigned). These buffers have the special property that
* Aligned on 64 bytes. These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
void* alloc = ws->tableEnd;
void* end = (BYTE *)alloc + bytes;
void* top = ws->allocStart;
void* alloc;
void* end;
void* top;

if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
return NULL;
}
alloc = ws->tableEnd;
end = (BYTE *)alloc + bytes;
top = ws->allocStart;

DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
assert((bytes & (sizeof(U32)-1)) == 0);
ZSTD_cwksp_internal_advance_phase(ws, phase);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(end <= top);
if (end > top) {
Expand All @@ -320,6 +401,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
}
#endif

assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
return alloc;
}

Expand Down Expand Up @@ -527,6 +610,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
* Functions Checking Free Space
***************************************/

/* ZSTD_alignmentSpaceWithinBounds() :
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
* actual amount of space used.
*/
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
size_t const estimatedSpace, int resizedWorkspace) {
if (resizedWorkspace) {
/* Resized/newly allocated wksp should have exact bounds */
return ZSTD_cwksp_used(ws) == estimatedSpace;
} else {
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
*/
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
}
}


MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
Expand Down

0 comments on commit 980f3bb

Please sign in to comment.