Skip to content

Commit

Permalink
[MERGE #2959 @rajatd] 17-05 ChakraCore servicing release
Browse files Browse the repository at this point in the history
Merge pull request #2959 from rajatd:release/1705

Fixes the following CVEs impacting ChakraCore:

CVE-2017-0229
CVE-2017-0223
CVE-2017-0224
CVE-2017-0252
CVE-2017-0230
CVE-2017-0234
CVE-2017-0235
CVE-2017-0236
CVE-2017-0228
CVE-2017-0238
CVE-2017-0266
  • Loading branch information
rajatd committed May 12, 2017
2 parents 922263e + b3aeb30 commit 67f4df0
Show file tree
Hide file tree
Showing 46 changed files with 775 additions and 132 deletions.
5 changes: 5 additions & 0 deletions lib/Backend/Func.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,11 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
workItem->GetJITFunctionBody()->GetProfileInfo()->DisableSwitchOpt();
outputData->disableSwitchOpt = TRUE;
}
else if (ex.Reason() == RejitReason::ArrayCheckHoistDisabled || ex.Reason() == RejitReason::ArrayAccessHelperCallEliminationDisabled)
{
workItem->GetJITFunctionBody()->GetProfileInfo()->DisableArrayCheckHoist(func.IsLoopBody());
outputData->disableArrayCheckHoist = TRUE;
}
else
{
Assert(ex.Reason() == RejitReason::TrackIntOverflowDisabled);
Expand Down
51 changes: 42 additions & 9 deletions lib/Backend/GlobOpt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16014,7 +16014,7 @@ GlobOpt::AttachBoundsCheckData(IR::Instr* instr, IR::Opnd* lowerBound, IR::Opnd*
instr->SetSrc2(upperBound);
if (offset != 0)
{
instr->SetDst(IR::IntConstOpnd::New(offset, TyInt32, instr->m_func, true));
instr->SetDst(IR::IntConstOpnd::New(offset, TyInt32, instr->m_func));
}
return instr;
}
Expand Down Expand Up @@ -16338,9 +16338,37 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef)
)
)
{
eliminatedLowerBoundCheck = true;
eliminatedUpperBoundCheck = true;
canBailOutOnArrayAccessHelperCall = false;
// Unless we're in asm.js (where it is guaranteed that virtual typed array accesses cannot read/write beyond 4GB),
// check the range of the index to make sure we won't access beyond the reserved memory beforing eliminating bounds
// checks in jitted code.
if (!GetIsAsmJSFunc())
{
IR::RegOpnd * idxOpnd = baseOwnerIndir->GetIndexOpnd();
if (idxOpnd)
{
StackSym * idxSym = idxOpnd->m_sym->IsTypeSpec() ? idxOpnd->m_sym->GetVarEquivSym(nullptr) : idxOpnd->m_sym;
Value * idxValue = FindValue(idxSym);
IntConstantBounds idxConstantBounds;
if (idxValue && idxValue->GetValueInfo()->TryGetIntConstantBounds(&idxConstantBounds))
{
BYTE indirScale = Lowerer::GetArrayIndirScale(baseValueType);
int32 upperBound = idxConstantBounds.UpperBound();
int32 lowerBound = idxConstantBounds.LowerBound();
if (lowerBound >= 0 && ((static_cast<uint64>(upperBound) << indirScale) < MAX_ASMJS_ARRAYBUFFER_LENGTH))
{
eliminatedLowerBoundCheck = true;
eliminatedUpperBoundCheck = true;
canBailOutOnArrayAccessHelperCall = false;
}
}
}
}
else
{
eliminatedLowerBoundCheck = true;
eliminatedUpperBoundCheck = true;
canBailOutOnArrayAccessHelperCall = false;
}
}
}

Expand Down Expand Up @@ -17307,8 +17335,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef)
: IR::IntConstOpnd::New(
hoistInfo.IndexConstantBounds().LowerBound(),
TyInt32,
instr->m_func,
true);
instr->m_func);
lowerBound->SetIsJITOptimizedReg(true);
IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func);
upperBound->SetIsJITOptimizedReg(true);
Expand Down Expand Up @@ -17456,7 +17483,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef)
{
IR::Opnd* lowerBound = baseOwnerIndir->GetIndexOpnd()
? static_cast<IR::Opnd *>(baseOwnerIndir->GetIndexOpnd())
: IR::IntConstOpnd::New(baseOwnerIndir->GetOffset(), TyInt32, instr->m_func, true);
: IR::IntConstOpnd::New(baseOwnerIndir->GetOffset(), TyInt32, instr->m_func);
lowerBound->SetIsJITOptimizedReg(true);
IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func);
upperBound->SetIsJITOptimizedReg(true);
Expand Down Expand Up @@ -20053,6 +20080,12 @@ GlobOpt::DoArrayLengthHoist() const
return doArrayLengthHoist;
}

bool
GlobOpt::DoEliminateArrayAccessHelperCall(Func *const func)
{
return DoArrayCheckHoist(func);
}

bool
GlobOpt::DoEliminateArrayAccessHelperCall() const
{
Expand Down Expand Up @@ -21400,7 +21433,7 @@ GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::In
{
sizeOpnd = IR::RegOpnd::New(TyUint32, this->func);

IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc, true);
IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc);

InsertInstr(IR::Instr::New(Js::OpCode::Mul_I4,
sizeOpnd,
Expand All @@ -21413,7 +21446,7 @@ GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::In
else
{
uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll;
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc, true);
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc);
}
loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd);
return sizeOpnd;
Expand Down
1 change: 1 addition & 0 deletions lib/Backend/GlobOpt.h
Original file line number Diff line number Diff line change
Expand Up @@ -1616,6 +1616,7 @@ class GlobOpt
static bool DoArrayMissingValueCheckHoist(Func *const func);
static bool DoArraySegmentHoist(const ValueType baseValueType, Func *const func);
static bool DoArrayLengthHoist(Func *const func);
static bool DoEliminateArrayAccessHelperCall(Func* func);
static bool DoTypedArrayTypeSpec(Func* func);
static bool DoNativeArrayTypeSpec(Func* func);
static bool IsSwitchOptEnabled(Func* func);
Expand Down
4 changes: 2 additions & 2 deletions lib/Backend/IRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4879,14 +4879,14 @@ IRBuilder::BuildAuxiliary(Js::OpCode newOpcode, uint32 offset)
// The property ID array needs to be both relocatable and available (so we can
// get the slot capacity), so we need to just pass the offset to lower and let
// lower take it from there...
srcOpnd = IR::IntConstOpnd::New(auxInsn->Offset, TyUint32, m_func, true);
srcOpnd = IR::IntConstOpnd::New(auxInsn->Offset, TyUint32, m_func);
dstOpnd = this->BuildDstOpnd(dstRegSlot);
dstOpnd->SetValueType(ValueType::GetObject(ObjectType::UninitializedObject));
instr = IR::Instr::New(newOpcode, dstOpnd, srcOpnd, m_func);

// Because we're going to be making decisions based off the value, we have to defer
// this until we get to lowering.
instr->SetSrc2(IR::IntConstOpnd::New(literalObjectId, TyUint32, m_func, true));
instr->SetSrc2(IR::IntConstOpnd::New(literalObjectId, TyUint32, m_func));

if (dstOpnd->m_sym->m_isSingleDef)
{
Expand Down
6 changes: 6 additions & 0 deletions lib/Backend/JITTimeProfileInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,12 @@ JITTimeProfileInfo::DisableAggressiveIntTypeSpec(bool isLoopBody)
m_profileData.flags |= isLoopBody ? Flags_disableAggressiveIntTypeSpec_jitLoopBody : Flags_disableAggressiveIntTypeSpec;
}

void
JITTimeProfileInfo::DisableArrayCheckHoist(bool isLoopBody)
{
m_profileData.flags |= isLoopBody ? Flags_disableArrayCheckHoist_jitLoopBody : Flags_disableArrayCheckHoist;
}

void
JITTimeProfileInfo::DisableStackArgOpt()
{
Expand Down
1 change: 1 addition & 0 deletions lib/Backend/JITTimeProfileInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class JITTimeProfileInfo
void DisableStackArgOpt();
void DisableSwitchOpt();
void DisableTrackCompoundedIntOverflow();
void DisableArrayCheckHoist(bool isLoopBody);

bool IsModulusOpByPowerOf2(Js::ProfileId profileId) const;
bool IsAggressiveIntTypeSpecDisabled(const bool isJitLoopBody) const;
Expand Down
14 changes: 8 additions & 6 deletions lib/Backend/Lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12858,7 +12858,7 @@ void Lowerer::LowerBoundCheck(IR::Instr *const instr)
true,
addResultOpnd,
rightOpnd,
offsetOpnd ? offsetOpnd->UseWithNewType(TyInt32, func) : IR::IntConstOpnd::New(offset, TyInt32, func, true),
offsetOpnd ? offsetOpnd->UseWithNewType(TyInt32, func) : IR::IntConstOpnd::New(offset, TyInt32, func),
insertBeforeInstr);
InsertBranch(LowererMD::MDOverflowBranchOpcode, bailOutLabel, insertBeforeInstr);

Expand All @@ -12870,7 +12870,7 @@ void Lowerer::LowerBoundCheck(IR::Instr *const instr)
// $bailOut:
if(!rightOpnd)
{
rightOpnd = IR::IntConstOpnd::New(offset, TyInt32, func, true);
rightOpnd = IR::IntConstOpnd::New(offset, TyInt32, func);
}
InsertCompareBranch(leftOpnd, rightOpnd, compareOpCode, doUnsignedCompare, skipBailOutLabel, insertBeforeInstr);
}
Expand Down Expand Up @@ -16874,16 +16874,18 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef)
const IR::AutoReuseOpnd autoReuseReg(reg, m_func);
InsertMove(reg, src, stElem);

bool bailOutOnHelperCall = stElem->HasBailOutInfo() && (stElem->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall);

// Convert to float, and assign to indirOpnd
if (baseValueType.IsLikelyOptimizedVirtualTypedArray())
{
IR::RegOpnd* dstReg = IR::RegOpnd::New(indirOpnd->GetType(), this->m_func);
m_lowererMD.EmitLoadFloat(dstReg, reg, stElem);
m_lowererMD.EmitLoadFloat(dstReg, reg, stElem, bailOutOnHelperCall);
InsertMove(indirOpnd, dstReg, stElem);
}
else
{
m_lowererMD.EmitLoadFloat(indirOpnd, reg, stElem);
m_lowererMD.EmitLoadFloat(indirOpnd, reg, stElem, bailOutOnHelperCall);
}

}
Expand Down Expand Up @@ -20569,7 +20571,7 @@ bool Lowerer::GenerateFastEqBoolInt(IR::Instr * instr, bool *pNeedHelper)
// If it's not zero, then it's either 1, in which case it's true, or it's something else, in which
// case the two will compare as inequal
InsertCompareBranch(
IR::IntConstOpnd::New((((IntConstType)1) << Js::VarTag_Shift) + Js::AtomTag, IRType::TyVar, this->m_func),
IR::IntConstOpnd::New((((IntConstType)1) << Js::VarTag_Shift) + Js::AtomTag, IRType::TyVar, this->m_func, true),
srcInt->AsRegOpnd(),
Js::OpCode::BrNeq_A,
isBranchNotCompare ? inequalResultTarget : forceInequal, // in the case of branching, we can go straight to the inequal target; for compares, we need to load the value
Expand Down Expand Up @@ -23889,7 +23891,7 @@ void Lowerer::GenerateSingleCharStrJumpTableLookup(IR::Instr * instr)

// CMP charOpnd, lastCaseIndex - baseCaseIndex
// JA defaultLabel
InsertCompareBranch(charOpnd, IR::IntConstOpnd::New(multiBrInstr->m_lastCaseValue - multiBrInstr->m_baseCaseValue, TyUint32, func, true),
InsertCompareBranch(charOpnd, IR::IntConstOpnd::New(multiBrInstr->m_lastCaseValue - multiBrInstr->m_baseCaseValue, TyUint32, func),
Js::OpCode::BrGt_A, true, defaultLabelInstr, instr);

instr->UnlinkSrc1();
Expand Down
50 changes: 44 additions & 6 deletions lib/Backend/LowerMDShared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1257,7 +1257,7 @@ void LowererMD::ChangeToShift(IR::Instr *const instr, const bool needFlags)
}
}

void LowererMD::ChangeToMul(IR::Instr *const instr, bool hasOverflowCheck)
void LowererMD::ChangeToIMul(IR::Instr *const instr, bool hasOverflowCheck)
{
// If non-32 bit overflow check is needed, we have to use the IMUL form.
if (hasOverflowCheck && !instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow())
Expand All @@ -1272,8 +1272,29 @@ void LowererMD::ChangeToMul(IR::Instr *const instr, bool hasOverflowCheck)
{
// MOV reg, imm
temp2 = IR::RegOpnd::New(TyInt32, instr->m_func);

IR::Opnd * src2 = instr->GetSrc2();
bool dontEncode = false;
if (src2->IsHelperCallOpnd())
{
dontEncode = true;
}
else if (src2->IsIntConstOpnd() || src2->IsAddrOpnd())
{
dontEncode = src2->IsIntConstOpnd() ? src2->AsIntConstOpnd()->m_dontEncode : src2->AsAddrOpnd()->m_dontEncode;
}
else if (src2->IsInt64ConstOpnd())
{
dontEncode = false;
}
else
{
AssertMsg(false, "Unexpected immediate opnd");
throw Js::OperationAbortedException();
}

instr->InsertBefore(IR::Instr::New(Js::OpCode::MOV, temp2,
IR::IntConstOpnd::New((IntConstType)instr->GetSrc2()->GetImmediateValue(instr->m_func), TyInt32, instr->m_func, true),
IR::IntConstOpnd::New((IntConstType)instr->GetSrc2()->GetImmediateValue(instr->m_func), TyInt32, instr->m_func, dontEncode),
instr->m_func));
}
// eax = IMUL eax, reg
Expand Down Expand Up @@ -2061,7 +2082,7 @@ void LowererMD::LegalizeSrc(IR::Instr *const instr, IR::Opnd *src, const uint fo
if (!instr->isInlineeEntryInstr)
{
Assert(forms & L_Reg);
IR::IntConstOpnd * newIntOpnd = IR::IntConstOpnd::New(intOpnd->GetValue(), intOpnd->GetType(), instr->m_func, true);
IR::IntConstOpnd * newIntOpnd = intOpnd->Copy(instr->m_func)->AsIntConstOpnd();
IR::IndirOpnd * indirOpnd = instr->m_func->GetTopFunc()->GetConstantAddressIndirOpnd(intOpnd->GetValue(), newIntOpnd, IR::AddrOpndKindConstantAddress, TyMachPtr, Js::OpCode::MOV);
if (HoistLargeConstant(indirOpnd, src, instr))
{
Expand Down Expand Up @@ -2125,7 +2146,7 @@ void LowererMD::LegalizeSrc(IR::Instr *const instr, IR::Opnd *src, const uint fo
Assert(!instr->isInlineeEntryInstr);
Assert(forms & L_Reg);
// TODO: michhol, remove cast after making m_address intptr
IR::AddrOpnd * newAddrOpnd = IR::AddrOpnd::New(addrOpnd->m_address, addrOpnd->GetAddrOpndKind(), instr->m_func, true);
IR::AddrOpnd * newAddrOpnd = addrOpnd->Copy(instr->m_func)->AsAddrOpnd();
IR::IndirOpnd * indirOpnd = instr->m_func->GetTopFunc()->GetConstantAddressIndirOpnd((intptr_t)addrOpnd->m_address, newAddrOpnd, addrOpnd->GetAddrOpndKind(), TyMachPtr, Js::OpCode::MOV);
if (HoistLargeConstant(indirOpnd, src, instr))
{
Expand Down Expand Up @@ -6645,7 +6666,7 @@ LowererMD::EmitLoadFloatCommon(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertIn
}

IR::RegOpnd *
LowererMD::EmitLoadFloat(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr)
LowererMD::EmitLoadFloat(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr, bool bailOutOnHelperCall)
{
IR::LabelInstr *labelDone;
IR::Instr *instr;
Expand All @@ -6657,6 +6678,23 @@ LowererMD::EmitLoadFloat(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr)
return nullptr;
}

if (bailOutOnHelperCall)
{
if(!GlobOpt::DoEliminateArrayAccessHelperCall(this->m_func))
{
// Array access helper call removal is already off for some reason. Prevent trying to rejit again
// because it won't help and the same thing will happen again. Just abort jitting this function.
if(PHASE_TRACE(Js::BailOutPhase, this->m_func))
{
Output::Print(_u(" Aborting JIT because EliminateArrayAccessHelperCall is already off\n"));
Output::Flush();
}
throw Js::OperationAbortedException();
}

throw Js::RejitException(RejitReason::ArrayAccessHelperCallEliminationDisabled);
}

IR::Opnd *memAddress = dst;

if (dst->IsRegOpnd())
Expand Down Expand Up @@ -7219,7 +7257,7 @@ LowererMD::LowerInt4MulWithBailOut(
// Lower the instruction
if (!simplifiedMul)
{
LowererMD::ChangeToMul(instr, needsOverflowCheck);
LowererMD::ChangeToIMul(instr, needsOverflowCheck);
}

const auto insertBeforeInstr = checkForNegativeZeroLabel ? checkForNegativeZeroLabel : bailOutLabel;
Expand Down
4 changes: 2 additions & 2 deletions lib/Backend/LowerMDShared.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class LowererMD
static void ChangeToAdd(IR::Instr *const instr, const bool needFlags);
static void ChangeToSub(IR::Instr *const instr, const bool needFlags);
static void ChangeToShift(IR::Instr *const instr, const bool needFlags);
static void ChangeToMul(IR::Instr *const instr, const bool hasOverflowCheck = false);
static void ChangeToIMul(IR::Instr *const instr, const bool hasOverflowCheck = false);
static const uint16 GetFormalParamOffset();
static const Js::OpCode MDUncondBranchOpcode;
static const Js::OpCode MDExtend32Opcode;
Expand Down Expand Up @@ -222,7 +222,7 @@ class LowererMD
static IR::Instr *InsertConvertFloat64ToInt32(const RoundMode roundMode, IR::Opnd *const dst, IR::Opnd *const src, IR::Instr *const insertBeforeInstr);
void ConvertFloatToInt32(IR::Opnd* intOpnd, IR::Opnd* floatOpnd, IR::LabelInstr * labelHelper, IR::LabelInstr * labelDone, IR::Instr * instInsert);
void EmitLoadFloatFromNumber(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr);
IR::RegOpnd * EmitLoadFloat(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr);
IR::RegOpnd * EmitLoadFloat(IR::Opnd *dst, IR::Opnd *src, IR::Instr *insertInstr, bool bailOutOnHelperCall = false);
static void EmitNon32BitOvfCheck(IR::Instr *instr, IR::Instr *insertInstr, IR::LabelInstr* bailOutLabel);

static void LowerInt4NegWithBailOut(IR::Instr *const instr, const IR::BailOutKind bailOutKind, IR::LabelInstr *const bailOutLabel, IR::LabelInstr *const skipBailOutLabel);
Expand Down
4 changes: 4 additions & 0 deletions lib/Backend/NativeCodeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1135,6 +1135,10 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor

if (body->HasDynamicProfileInfo())
{
if (jitWriteData.disableArrayCheckHoist)
{
body->GetAnyDynamicProfileInfo()->DisableArrayCheckHoist(workItem->Type() == JsLoopBodyWorkItemType);
}
if (jitWriteData.disableAggressiveIntTypeSpec)
{
body->GetAnyDynamicProfileInfo()->DisableAggressiveIntTypeSpec(workItem->Type() == JsLoopBodyWorkItemType);
Expand Down
8 changes: 4 additions & 4 deletions lib/Backend/amd64/LowererMDArch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2748,19 +2748,19 @@ LowererMDArch::EmitLoadInt32(IR::Instr *instrLoad, bool conversionFromObjectAllo
// Known to be non-integer. If we are required to bail out on helper call, just re-jit.
if (!doFloatToIntFastPath && bailOutOnHelper)
{
if(!GlobOpt::DoAggressiveIntTypeSpec(this->m_func))
if(!GlobOpt::DoEliminateArrayAccessHelperCall(this->m_func))
{
// Aggressive int type specialization is already off for some reason. Prevent trying to rejit again
// Array access helper call removal is already off for some reason. Prevent trying to rejit again
// because it won't help and the same thing will happen again. Just abort jitting this function.
if(PHASE_TRACE(Js::BailOutPhase, this->m_func))
{
Output::Print(_u(" Aborting JIT because AggressiveIntTypeSpec is already off\n"));
Output::Print(_u(" Aborting JIT because EliminateArrayAccessHelperCall is already off\n"));
Output::Flush();
}
throw Js::OperationAbortedException();
}

throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled);
throw Js::RejitException(RejitReason::ArrayAccessHelperCallEliminationDisabled);
}
}
else
Expand Down
Loading

0 comments on commit 67f4df0

Please sign in to comment.