Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

JIT: Prezero memory for RefPosition and Interval #103707

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions src/coreclr/jit/lsra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -799,9 +799,7 @@ LinearScanInterface* getLinearScanAllocator(Compiler* comp)
//
LinearScan::LinearScan(Compiler* theCompiler)
: compiler(theCompiler)
, intervals(theCompiler->getAllocator(CMK_LSRA_Interval))
, allocationPassComplete(false)
, refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition))
, killHead(nullptr)
, killTail(&killHead)
, listNodePool(theCompiler)
Expand Down
275 changes: 224 additions & 51 deletions src/coreclr/jit/lsra.h
Original file line number Diff line number Diff line change
Expand Up @@ -443,10 +443,227 @@ inline bool RefTypeIsDef(RefType refType)

typedef regNumberSmall* VarToRegMap;

typedef jitstd::list<Interval> IntervalList;
typedef jitstd::list<RefPosition> RefPositionList;
typedef jitstd::list<RefPosition>::iterator RefPositionIterator;
typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator;
// Like normal jitstd::list, but guarantees that the memory is zeroed.
template <typename T, size_t ChunkSize, CompMemKind MemKind>
class PreZeroedList
{
struct Node
{
T Value;
Node* Prev;
Node* Next;

template <typename... Args>
Node(Args... args)
: Value(args...)
{
}
};

#ifdef DEBUG
size_t m_count = 0;
#endif

Node* m_head = nullptr;
Node* m_tail = nullptr;
Node** m_tailSlot = &m_head;

char* m_buffer = nullptr;
char* m_bufferEnd = nullptr;

public:
#ifdef DEBUG
size_t size()
{
return m_count;
}
#endif

template <typename... Args>
T* Allocate(Compiler* comp, Args... args)
{
if (m_buffer == m_bufferEnd)
{
m_buffer = new (comp, MemKind) char[ChunkSize * sizeof(Node)];
m_bufferEnd = m_buffer + ChunkSize * sizeof(Node);
memset(m_buffer, 0, ChunkSize * sizeof(Node));
}

Node* newNode = new (m_buffer, jitstd::placement_t()) Node(args...);
m_buffer += sizeof(Node);

INDEBUG(m_count++);

newNode->Prev = m_tail;
*m_tailSlot = m_tail = newNode;
m_tailSlot = &newNode->Next;

return &newNode->Value;
}

class iterator
{
friend class PreZeroedList;
Node* m_node;

explicit iterator(Node* node)
: m_node(node)
{
}

public:
iterator()
: m_node(nullptr)
{
}

iterator& operator++()
{
m_node = m_node->Next;
return *this;
}

iterator& operator++(int)
{
m_node = m_node->Next;
return *this;
}

iterator& operator--()
{
m_node = m_node->Prev;
return *this;
}

iterator& operator--(int)
{
m_node = m_node->Prev;
return *this;
}

bool operator==(const iterator& it)
{
return m_node == it.m_node;
}

bool operator!=(const iterator& it)
{
return m_node != it.m_node;
}

T& operator*()
{
return m_node->Value;
}

T* operator->()
{
return &m_node->Value;
}

operator T*()
{
return &m_node->Value;
}
};

class reverse_iterator
{
friend class PreZeroedList;
Node* m_node;

explicit reverse_iterator(Node* node)
: m_node(node)
{
}

public:
reverse_iterator()
: m_node(nullptr)
{
}

reverse_iterator& operator++()
{
m_node = m_node->Prev;
return *this;
}

reverse_iterator& operator++(int)
{
m_node = m_node->Prev;
return *this;
}

reverse_iterator& operator--()
{
m_node = m_node->Next;
return *this;
}

reverse_iterator& operator--(int)
{
m_node = m_node->Next;
return *this;
}

bool operator==(const reverse_iterator& it)
{
return m_node == it.m_node;
}

bool operator!=(const reverse_iterator& it)
{
return m_node != it.m_node;
}

T& operator*()
{
return m_node->Value;
}

T* operator->()
{
return &m_node->Value;
}

operator T*()
{
return &m_node->Value;
}
};

iterator begin()
{
return iterator(m_head);
}

iterator end()
{
return iterator(nullptr);
}

iterator backPosition()
{
return iterator(m_tail);
}

reverse_iterator rbegin()
{
return reverse_iterator(m_tail);
}

reverse_iterator rend()
{
return reverse_iterator(nullptr);
}
};

typedef PreZeroedList<Interval, 32, CMK_LSRA_Interval> IntervalList;

typedef PreZeroedList<RefPosition, 64, CMK_LSRA_RefPosition> RefPositionList;
typedef PreZeroedList<RefPosition, 64, CMK_LSRA_RefPosition>::iterator RefPositionIterator;
typedef PreZeroedList<RefPosition, 64, CMK_LSRA_RefPosition>::reverse_iterator RefPositionReverseIterator;

class Referenceable
{
Expand Down Expand Up @@ -2172,33 +2389,9 @@ class Interval : public Referenceable
Interval(RegisterType registerType, SingleTypeRegSet registerPreferences)
: Referenceable(registerType)
, registerPreferences(registerPreferences)
, registerAversion(RBM_NONE)
, relatedInterval(nullptr)
, assignedReg(nullptr)
, varNum(0)
, physReg(REG_COUNT)
, isActive(false)
, isLocalVar(false)
, isSplit(false)
, isSpilled(false)
, isInternal(false)
, isStructField(false)
, isPromotedStruct(false)
, hasConflictingDefUse(false)
, hasInterferingUses(false)
, isSpecialPutArg(false)
, preferCalleeSave(false)
, isConstant(false)
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
, isUpperVector(false)
, isPartiallySpilled(false)
#endif
, isWriteThru(false)
, isSingleDef(false)
#ifdef DEBUG
, intervalIndex(0)
#endif
{
// Intervals are allocated on top of zeroed memory, so all fields are already zero
}

#ifdef DEBUG
Expand Down Expand Up @@ -2595,36 +2788,16 @@ class RefPosition
LsraLocation nodeLocation,
GenTree* treeNode,
RefType refType DEBUG_ARG(GenTree* buildNode))
: referent(nullptr)
, nextRefPosition(nullptr)
, treeNode(treeNode)
, registerAssignment(RBM_NONE)
: treeNode(treeNode)
, bbNum(bbNum)
, nodeLocation(nodeLocation)
, refType(refType)
, multiRegIdx(0)
#ifdef TARGET_ARM64
, needsConsecutive(false)
, regCount(0)
#endif
, lastUse(false)
, reload(false)
, spillAfter(false)
, singleDefSpill(false)
, writeThru(false)
, copyReg(false)
, moveReg(false)
, isPhysRegRef(false)
, isFixedRegRef(false)
, isLocalDefUse(false)
, delayRegFree(false)
, outOfOrder(false)
#ifdef DEBUG
, minRegCandidateCount(1)
, rpNum(0)
, buildNode(buildNode)
#endif
{
// RefPositions are allocated on top of zeroed memory, so all fields are already zero
}

Interval* getInterval()
Expand Down
7 changes: 3 additions & 4 deletions src/coreclr/jit/lsrabuild.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,7 @@ void RefInfoListNodePool::ReturnNode(RefInfoListNode* listNode)
//
Interval* LinearScan::newInterval(RegisterType theRegisterType)
{
intervals.emplace_back(theRegisterType, allRegs(theRegisterType));
Interval* newInt = &intervals.back();
Interval* newInt = intervals.Allocate(compiler, theRegisterType, allRegs(theRegisterType));

#ifdef DEBUG
newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1);
Expand All @@ -178,8 +177,8 @@ Interval* LinearScan::newInterval(RegisterType theRegisterType)
//
RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType)
{
refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType DEBUG_ARG(currBuildNode));
RefPosition* newRP = &refPositions.back();
RefPosition* newRP =
refPositions.Allocate(compiler, curBBNum, nodeLocation, treeNode, refType DEBUG_ARG(currBuildNode));
#ifdef DEBUG
// Reset currBuildNode so we do not set it for subsequent refpositions belonging
// to the same treeNode and hence, avoid printing it for every refposition inside
Expand Down
Loading