Skip to content

Commit

Permalink
[CVE-2018-8354] Array guards needed for asmjs on x86
Browse files Browse the repository at this point in the history
  • Loading branch information
Penguinwizzard authored and MikeHolman committed Sep 11, 2018
1 parent e03b3e3 commit 5192cdc
Showing 1 changed file with 61 additions and 4 deletions.
65 changes: 61 additions & 4 deletions lib/Backend/Lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9274,6 +9274,61 @@ void Lowerer::LowerLdLen(IR::Instr *const instr, const bool isHelper)
LowerLdFld(instr, IR::HelperOp_GetProperty, IR::HelperOp_GetProperty, false, nullptr, isHelper);
}

IR::Instr* InsertMaskableMove(bool isStore, bool generateWriteBarrier, IR::Opnd* dst, IR::Opnd* src1, IR::Opnd* src2, IR::Opnd* indexOpnd, IR::Instr* insertBeforeInstr, Lowerer* lowerer)
{
Assert(insertBeforeInstr->m_func->GetJITFunctionBody()->IsAsmJsMode());

// Mask with the bounds check operand to avoid speculation issues
const bool usesFastArray = insertBeforeInstr->m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer();
IR::RegOpnd* mask = nullptr;
bool shouldMaskResult = false;
if (!usesFastArray)
{
bool shouldMask = isStore ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) : CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad);
if (shouldMask && indexOpnd != nullptr)
{
// indices in asmjs fit in 32 bits, but we need a mask
IR::RegOpnd* temp = IR::RegOpnd::New(indexOpnd->GetType(), insertBeforeInstr->m_func);
lowerer->InsertMove(temp, indexOpnd, insertBeforeInstr, false);
lowerer->InsertAdd(false, temp, temp, IR::IntConstOpnd::New((uint32)src1->GetSize() - 1, temp->GetType(), insertBeforeInstr->m_func, true), insertBeforeInstr);

// For native ints and vars, we do the masking after the load; we don't do this for
// floats and doubles because the conversion to and from fp regs is slow.
shouldMaskResult = (!isStore) && IRType_IsNativeIntOrVar(src1->GetType()) && TySize[dst->GetType()] <= TySize[TyMachReg];

// When we do post-load masking, we AND the mask with dst, so they need to have the
// same type, as otherwise we'll hit asserts later on. When we do pre-load masking,
// we AND the mask with the index component of the indir opnd for the move from the
// array, so we need to align with that type instead.
mask = IR::RegOpnd::New((shouldMaskResult ? dst : indexOpnd)->GetType(), insertBeforeInstr->m_func);

if (temp->GetSize() != mask->GetSize())
{
Assert(mask->GetSize() == MachPtr);
Assert(src2->GetType() == TyUint32);
temp = temp->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
src2 = src2->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
}

lowerer->InsertSub(false, mask, temp, src2, insertBeforeInstr);
lowerer->InsertShift(Js::OpCode::Shr_A, false, mask, mask, IR::IntConstOpnd::New(TySize[mask->GetType()] * 8 - 1, TyInt8, insertBeforeInstr->m_func), insertBeforeInstr);

// If we're not masking the result, we're masking the index
if (!shouldMaskResult)
{
lowerer->InsertAnd(indexOpnd, indexOpnd, mask, insertBeforeInstr);
}
}
}
IR::Instr* ret = lowerer->InsertMove(dst, src1, insertBeforeInstr, generateWriteBarrier);
if(!usesFastArray && shouldMaskResult)
{
// Mask the result if we didn't use the mask earlier to mask the index
lowerer->InsertAnd(dst, dst, mask, insertBeforeInstr);
}
return ret;
}

IR::Instr *
Lowerer::LowerLdArrViewElem(IR::Instr * instr)
{
Expand Down Expand Up @@ -9342,7 +9397,8 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
}
done = instr;
}
InsertMove(dst, src1, done);

InsertMaskableMove(false, true, dst, src1, src2, indexOpnd, done, this);

instr->Remove();
return instrPrev;
Expand Down Expand Up @@ -9390,7 +9446,8 @@ Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr)
Assert(!dst->IsFloat64() || src1->IsFloat64());

IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1);
IR::Instr* newMove = InsertMove(dst, src1, done);

IR::Instr* newMove = InsertMaskableMove(false, true, dst, src1, instr->GetSrc2(), src1->AsIndirOpnd()->GetIndexOpnd(), done, this);

if (m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer())
{
Expand Down Expand Up @@ -9667,8 +9724,8 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)
instr->FreeSrc2();
}
}
// wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
InsertMove(dst, src1, done, false);
// wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
InsertMaskableMove(true, false, dst, src1, src2, indexOpnd, done, this);

instr->Remove();
return instrPrev;
Expand Down

0 comments on commit 5192cdc

Please sign in to comment.