Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / sam-t2 / sam / cpus / vonk / ss / api / memsync / src / MemoryAccessBuffer.cc
// ========== Copyright Header Begin ==========================================
//
// OpenSPARC T2 Processor File: MemoryAccessBuffer.cc
// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
//
// The above named program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License version 2 as published by the Free Software Foundation.
//
// The above named program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this work; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
//
// ========== Copyright Header End ============================================
/************************************************************************
**
** Copyright (C) 2002, Sun Microsystems, Inc.
**
** Sun considers its source code as an unpublished, proprietary
** trade secret and it is available only under strict license provisions.
** This copyright notice is placed here only to protect Sun in the event
** the source is deemed a published work. Disassembly, decompilation,
** or other means of reducing the object code to human readable form
** is prohibited by the license agreement under which this code is
** provided to the user or company in possession of this copy."
**
*************************************************************************/
#include "MemoryAccessBuffer.h"
#include <sstream>
using namespace std;
////////////////////////////////////////////////
MemoryAccessBuffer::MemoryAccessBuffer()
{
}
////////////////////////////////////////////////
MemoryAccessBuffer::MemoryAccessBuffer( const MemoryAccessBuffer & orig )
{
// Replace the following line with your function body.
// RIESLING_THROW_DOMAIN_ERROR( "Unimplemented function." );
}
////////////////////////////////////////////////
MemoryAccessBuffer::~MemoryAccessBuffer()
{
}
////////////////////////////////////////////////
const MemoryAccessBuffer &
MemoryAccessBuffer::operator=( const MemoryAccessBuffer & rhs )
{
// Replace the following line with your function body.
// RIESLING_THROW_DOMAIN_ERROR( "Unimplemented function." );
return *this;
}
////////////////////////////////////////////////
bool
MemoryAccessBuffer::operator==( const MemoryAccessBuffer & rhs ) const
{
// Replace the following line with your function body.
// RIESLING_THROW_DOMAIN_ERROR( "Unimplemented function." );
return false;
}
////////////////////////////////////////////////
string
MemoryAccessBuffer::toString() const
{
ostringstream os;
list<MemoryAccessEntry>::const_iterator ii;
os << "MemoryAccessBuffer (size=" << buf_.size() << ")" << endl;
for (ii = buf_.begin(); ii != buf_.end(); ii++) {
os << ii->toString();
}
os << endl;
return os.str();
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::pushBack(MemoryAccessEntry& entry)
{
list<MemoryAccessEntry>::iterator ii;
buf_.push_back(entry);
ii = buf_.end();
MSYNC_DEBUG(2, "MAB Pushback\n%s", entry.toString().c_str());
MSYNC_DEBUG(4, "%s", toString().c_str());
return (--ii);
}
/* STORE_COMMIT cannot be removed until all of the associated STORE_ACK,
STORE_INV, and STORE_UPDATE have done.
Considering the following sequence:
STORE_COMMIT->LOAD_DATA (L1 hit)
The LOAD_DATA should get data from memory which is old. If the STORE_COMMIT
data is written to the memory, then the LOAD_DATA will get the new data which
is wrong.
(Probably STORE_INV is not needed in my algorithm. Think it more before remove
this requirement.)
*/
void
MemoryAccessBuffer::popFront(vector<MemoryAccessEntry>* wdbuf, Tso::TsoChecker* tsoChecker)
{
list<MemoryAccessEntry>::iterator ii;
ii = buf_.begin();
while (ii != buf_.end() && (ii->isExecuted() || ii->isPopped())) {
if (ii->getEntryType() == MEM_STORE_COMMIT) {
if (!ii->isInvDone()) {
MSYNC_DEBUG(2, "popFront: MEM_STORE_COMMIT wait for invalidation, ii=%s", ii->toString().c_str());
break; // must wait until all invalidation is done
}
// list<MemoryAccessEntry>::iterator it, it1;
// it = ii->getLink();
// while (it != ii) { // note that the front() is not removed
// it1 = it->getLink();
// it->setExecuted(true);
// /* Set executed bit rather than remove them now to let them stay at MAB
// until they reach to the head.
// The main concern is the STORE_UPDATE record which is required for L1
// hit. There may be some problem if remove too early.
// */
// // buf_.erase(it);
// it = it1;
// }
if (ii->isLink2Valid()) {
ii->getLink2()->setAcked(true);
}
if (!ii->isPopped() && !ii->isWriteBack()) {
wdbuf->push_back(*ii); // write data to memory
}
}
if (ii->getEntryType() == MEM_EVICT) {
if (!ii->isInvDone()) {
MSYNC_DEBUG(2, "popFront: MEM_EVICT wait for invalidation, ii=%s", ii->toString().c_str());
break; // must wait until all invalidation is done
}
}
/* Note that N2 guarantees that LoadFill must be before the STEP that set
executed_ flag, hence, N2 always sets cacheL1_ false. Therefore, in N2
the LoadData can be removed as soon as it reach to the head of the MAB
and is executed. */
if (ii->getEntryType() == MEM_LOAD_DATA) {
if (ii->getDsrc() == DSRC_L2_MEMORY && ii->isCacheL1()) {
if (!ii->isLinkValid()) {
MSYNC_DEBUG(2, "popFront: MEM_LOAD_DATA wait for LoadFill, ii=%s", ii->toString().c_str());
break; // must wait until LoadFill
}
}
}
if (ii->getEntryType() == MEM_FETCH_DATA) {
if (ii->getDsrc() == DSRC_L2_MEMORY && ii->isCacheL1()) {
if (!ii->isLinkValid()) {
MSYNC_DEBUG(2, "popFront: MEM_FETCH_DATA wait for FetchFill, ii=%s", ii->toString().c_str());
break; // must wait until FetchFill
// if (ii->isLink2Valid() && !ii->isAcked()) break; // must wait StoreL2Commit removed
}
}
}
MSYNC_DEBUG(2, "MAB Remove (front)\nii=%s", ii->toString().c_str());
#ifdef TSO
/************************************************************************
* Interface to TsoChecker
************************************************************************/
// if a (load) entry is marked as popped, then it is never executed, don't
// let it get added to tso checking structure.
if (tsoChecker && !(ii->isPopped())) {
switch (ii->getEntryType()) {
case MEM_STORE_COMMIT:
if (ii->getDmaStore() == false) {
// if this is a DMA_STORE entry, don't let it go to TsoChecker
tsoChecker->input(ii->getThrdId(), ii->getIseq(), ii->getItype(), ii->getEntryType(),
ii->getAddr(), ii->getData(),
(ii->getItype() == ITYPE_ATOMIC && !(ii->isSwitchData())) ? (uint8_t) 0 : ii->getSizeV(),
ii->getDsrc(),
ii->getId(),
ii->getGlobal(), ii->getId());
}
break;
case MEM_LOAD_DATA:
tsoChecker->input(ii->getThrdId(), ii->getIseq(), ii->getItype(), ii->getEntryType(),
ii->getAddr(), ii->getData(), ii->getSizeV(), ii->getDsrc(),
ii->getDsrcMid(),
ii->getGlobal(), ii->getId());
break;
default: break;
}
}
#endif
ii++;
buf_.pop_front(); // remove the front one
// Note that when an entry is removed, all links to this entry becomes
// undefined.
}
MSYNC_DEBUG(4, "mab=%s", toString().c_str());
// // see if we can write back completed store_commits earlier
// ii = buf_.begin();
// while (ii != buf_.end()) {
// if ((ii->getEntryType() == MEM_STORE_COMMIT) &&
// (ii->isExecuted() && ii->isInvDone()) &&
// (!ii->isWriteBack()))
// {
// if (updateMemory(ii) == true) {
// MSYNC_DEBUG(1, "popFront: early WriteMemory id=%d T%d pa=%#llx data=%#llx", (int)ii->getId(), (int)ii->getThrdId(), ii->getAddr(), ii->getData());
// wdbuf->push_back(*ii); // write data to memory
// }
// }
// ii++;
// }
}
//=============================================================================
//=============================================================================
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findL1InstrEntry(LoadStoreCmd& cmd)
{
list<MemoryAccessEntry>::iterator ii, match;
uint64_t addr = cmd.getAddr();
uint32_t cid = cmd.getCoreId();
if (buf_.size() == 0) {
return buf_.end();
}
ii = buf_.end();
do {
ii--;
if (!ii->isPopped() &&
cid == ii->getCoreId() &&
(addr & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
(ii->getEntryType() == MEM_FETCH_FILL)) {
return (ii);
}
} while (ii != buf_.begin());
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findL1DataEntry(LoadStoreCmd& cmd)
{
return findL1DataEntry(buf_.end(), cmd.getCoreId(), cmd.getThrdId(), cmd.getAddr());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findL1DataEntry(list<MemoryAccessEntry>::iterator from, uint32_t cid, uint32_t tid, uint64_t addr)
{
list<MemoryAccessEntry>::iterator ii, match;
if (buf_.size() == 0) {
return buf_.end();
}
ii = from;
do {
ii--;
if (!ii->isPopped() &&
cid == ii->getCoreId() &&
(addr & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
(ii->getEntryType() == MEM_STORE_UPDATE ||
ii->getEntryType() == MEM_LOAD_FILL)) {
return (ii);
}
} while (ii != buf_.begin());
return (buf_.end());
}
/* The first, from, does not participate in the search */
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findL2DataEntry(list<MemoryAccessEntry>::iterator from, LoadStoreCmd& cmd)
{
return findL2DataEntry(from, cmd.getCoreId(), cmd.getThrdId(), cmd.getAddr());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findL2DataEntry(list<MemoryAccessEntry>::iterator from, uint32_t cid, uint32_t tid, uint64_t addr, bool executed)
{
list<MemoryAccessEntry>::iterator ii;
if (buf_.size() == 0 || from == buf_.begin()) {
return buf_.end();
}
int l2reg = 0;
if (((addr & 0xf000000000) >= 0xa000000000) &&
((addr & 0xf000000000) <= 0xbf00000000))
{
// l2 registers
l2reg = 1;
}
ii = from;
do
{
ii--;
if (!ii->isPopped() &&
((addr & ADDR_MASK) == (ii->getAddr() & ADDR_MASK)) &&
(ii->getEntryType() == MEM_STORE_COMMIT))
{
if ((executed == true) && (ii->isExecuted() == false))
{
// we are looking for a store_commit entry that is already executed
continue;
}
else
{
if ((l2reg == 1) &&
((ii->getCoreId() / RieslingInterface::cores_per_cpu) != (cid / RieslingInterface::cores_per_cpu)))
{
// For, each node has its own l2 register set, those registers
// are not accessable by other nodes, so the node-id must be the
// same to be considered as a match.
continue;
}
else
{
return (ii);
}
}
}
} while (ii != buf_.begin());
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findStoreInvStoreUpdateSrc(LoadStoreCmd& cmd)
{
list<MemoryAccessEntry>::iterator ii;
uint32_t cbit = 1 << cmd.getCoreId();
if (buf_.size() == 0) {
return buf_.end();
}
for (ii = buf_.begin(); ii != buf_.end(); ii++) {
if (!ii->isPopped() &&
ii->getEntryType() == MEM_STORE_COMMIT &&
cmd.getSrcTid() == ii->getThrdId() &&
(cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
(cbit & ii->getInv()) != 0 && (cbit & ii->getCinv()) == 0) {
return (ii);
}
}
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findStoreAckSrc(LoadStoreCmd& cmd)
{
list<MemoryAccessEntry>::iterator ii;
if (buf_.size() == 0) {
return buf_.end();
}
MSYNC_DEBUG(4, "%s", toString().c_str());
for (ii = buf_.begin(); ii != buf_.end(); ii++) {
if (!ii->isPopped() &&
ii->getEntryType() == MEM_STORE_COMMIT &&
cmd.getSrcTid() == ii->getThrdId() &&
(cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
!(ii->isAcked())) {
return (ii);
}
}
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findLoadFillSrc(LoadStoreCmd& cmd)
{
return findLoadFillSrc(buf_.end(), cmd.getCoreId(), cmd.getThrdId(), cmd.getAddr(), false);
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findLoadFillSrc(list<MemoryAccessEntry>::iterator endMark, uint32_t cid, uint32_t tid, uint64_t addr, bool noFault)
{
list<MemoryAccessEntry>::iterator ii;
if (buf_.size() == 0) {
return buf_.end();
}
for (ii = buf_.begin(); ii != endMark; ii++) {
#ifdef N2MODEL
if (!ii->isPopped() &&
ii->getEntryType() == MEM_LOAD_DATA &&
ii->getDsrc() == DSRC_L2_MEMORY &&
tid == ii->getThrdId() &&
!ii->isExecuted()) {
if (!noFault) {
if ((addr & ADDR_MASK) != (ii->getAddr() & ADDR_MASK))
{
MS_ERROR("LoadFill mismatches with address of 1st non-executed LoadData of that thread. tid=%d PA=%llx", tid, addr);
return buf_.end();
}
if (ii->isLinkValid())
{
MS_ERROR("LoadFill finds 1st non-executed LoadData having LoadFill already. tid=%d PA=%llx", tid, addr);
return buf_.end();
}
return (ii);
}
else {
if (((addr & ADDR_MASK) == (ii->getAddr() & ADDR_MASK)) &&
(!ii->isLinkValid())) {
return (ii);
}
else {
return buf_.end();
}
}
}
#else
if (!ii->isPopped() &&
ii->getEntryType() == MEM_LOAD_DATA &&
ii->getDsrc() == DSRC_L2_MEMORY &&
ii->isCacheL1() &&
cid == ii->getCoreId() &&
(addr & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
// !ii->isExecuted() &&
!ii->isLinkValid()) { // the last condition assumes the LoadFill
return (ii); // is in the same order as LoadData in the same core
}
#endif
}
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findFetchFillSrc(LoadStoreCmd& cmd)
{
list<MemoryAccessEntry>::iterator ii;
uint32_t cid = cmd.getCoreId();
uint32_t tid = cmd.getThrdId();
if (buf_.size() == 0) {
return buf_.end();
}
MSYNC_DEBUG(4, "Find cid=%d tid=%d addr=%llx",
cmd.getCoreId(), cmd.getThrdId(), cmd.getAddr());
MSYNC_DEBUG(4, "%s", toString().c_str());
for (ii = buf_.begin(); ii != buf_.end(); ii++) {
if (!ii->isPopped() &&
ii->getEntryType() == MEM_FETCH_DATA &&
ii->getDsrc() == DSRC_L2_MEMORY &&
ii->isCacheL1() &&
cid == ii->getCoreId() &&
(cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK) &&
!ii->isLinkValid()) { // the last condition assumes the LoadFill
return (ii); // is in the same order as LoadData in the same core
}
}
return (buf_.end());
}
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findEvictInvSrc(LoadStoreCmd& cmd)
{
list<MemoryAccessEntry>::iterator ii;
uint32_t cbit = 1 << cmd.getCoreId();
if (buf_.size() == 0) {
return buf_.end();
}
list<MemoryAccessEntry>::iterator jj = buf_.end();
for (ii = buf_.begin(); ii != buf_.end(); ii++) {
if (!ii->isPopped() &&
ii->getEntryType() == MEM_EVICT &&
cmd.getSrcBank() == ii->getSrcBank() &&
cmd.getSet() == ii->getSet() &&
// the address offset within a dcache line size must match
(cmd.getAddr() & (DCACHE_LINE_SIZE - 1)) == (ii->getAddr() & (DCACHE_LINE_SIZE - 1)) &&
// this assume we have one-to-one mapping between EVICT and EVICT_INV,
// if we have more than one EVICT_INV for one EVICT, this can be in
// trouble. 4/28/05
(cbit & ii->getInv()) != 0) {
if ((cbit & ii->getCinv()) == 0) {
// the first time we match up the entry, handle it.
return (ii);
}
else {
// we can have multiple EVICT_INV point to one EVICT, the first
// match will take care of business, when the others come, we won't
// have a real match for them, so just return the first match,
// otherwise we will hit no match assert.
jj = ii;
//cerr << "WARNING: MemoryAccessBuffer::findEvictInvSrc: extra EVICT_INV, cmd=" << cmd.toString() << endl;//DBX
}
}
}
//return (buf_.end());
return jj;
}
//=============================================================================
//=============================================================================
void
MemoryAccessBuffer::empty(int tid)
{
MSYNC_DEBUG(3, "Before empty T%d load entries:\n%s", tid, toString().c_str());
if (tid == -1)
{
// we are going to flush out the entire buffer, write back completed
// store_commits first. If we are just to empty a particular strand's
// entries, we should not have any completed store_commit entries for
// that strand, so not looking for those here.
list<MemoryAccessEntry>::iterator ii = buf_.begin();
while (ii != buf_.end())
{
if ((ii->getEntryType() == MEM_STORE_COMMIT) &&
(ii->isExecuted() && ii->isInvDone()) &&
(!ii->isWriteBack()))
{
MSYNC_DEBUG(2, "before flush WriteMemory id=%d T%d pa=%#llx data=%#llx", (int)ii->getId(), (int)ii->getThrdId(), ii->getAddr(), ii->getData());
// write data to memory
rif_->writeMemory(ii->getCoreId(), ii->getThrdId(), (ii->getAddr() & ADDR_MASK), ii->getData(), 8);
}
ii++;
}
}
list<MemoryAccessEntry>::iterator ii;
ii = buf_.end();
int size = buf_.size();
while (size > 0) {
ii--;
size--;
//TODO check for entry type if we only empty a particular buffer,
// e.g., load buffer. Otherwise remove all entries of the
// specified strand-id.
// if (ii->getThrdId() == tid) {
if ((tid == -1) ||
((ii->getThrdId() == tid) &&
(ii->getEntryType() == MEM_LOAD_DATA || ii->getEntryType() == MEM_LOAD_FILL) &&
(!ii->isExecuted()))) {
//TODO make sure we free the space properly
MSYNC_DEBUG(2, "%s Empty T%d load entry=%s", "MAB", tid, ii->toString().c_str());
ii->setPopped();
if (ii->getItype() == ITYPE_ATOMIC) {
// an atomic instr comes with both load and store entries,
// pop both of them.
popBackStore(ii);
}
buf_.erase(ii);
}
}
MSYNC_DEBUG(3, "After empty T%d:\n%s", tid, toString().c_str());
}
//=============================================================================
//=============================================================================
void
MemoryAccessBuffer::popBack(int tid, int count)
{
MSYNC_DEBUG(3, "Before popBack:\n%s", toString().c_str());
assert(buf_.size() >= count);
list<MemoryAccessEntry>::iterator ii;
ii = buf_.end();
int size = buf_.size();
while (size > 0 && count > 0) {
ii--;
size--;
if ((ii->getThrdId() == tid) &&
(ii->getEntryType() == MEM_LOAD_DATA)) {
MSYNC_DEBUG(2, "%s Remove (back): load=%s", "MAB", ii->toString().c_str());
//TODO make sure we free the space properly
// there might be LOAD_FILL of this entry to come, so don't
// remove it, just mark as popped, so later we can retire it
// (as if it is an executed entry)
//buf_.erase(ii);
ii->setPopped();
count--;
if (ii->getItype() == ITYPE_ATOMIC) {
// an atomic instr comes with both load and store entries,
// pop both of them.
popBackStore(ii);
}
}
}
MSYNC_DEBUG(3, "After popBack:\n%s", toString().c_str());
}
//=============================================================================
//=============================================================================
void
MemoryAccessBuffer::popBackStore(list<MemoryAccessEntry>::iterator& ii)
{
list<MemoryAccessEntry>::iterator jj = buf_.end();
//int size2 = buf_.size();
bool found = false;
//while (!found && size2 > 0) {
while (!found && (jj != ii)) {
// we are looking for a store_commit (jj) of an atomic instr, the SC
// always comes after the corresponding load_data (ii), so if jj
// reaches ii, then we know there is no SC associated with the ld_data
jj--;
//size2--;
if ((jj->getThrdId() == ii->getThrdId()) &&
(jj->getEntryType() == MEM_STORE_COMMIT) &&
(jj->getItype() == ITYPE_ATOMIC) &&
(!jj->isPopped())) {
MSYNC_DEBUG(2, "%s Remove (back): store=%s", "MAB", jj->toString().c_str());
jj->setPopped();
found = true;
}
}
if (found) {
refreshLoadEntry(jj);
}
else {
MSYNC_DEBUG(1, "WARNING: popBackStore(): no matched store entry for load=%s", ii->toString().c_str());
}
}
//=============================================================================
// once a store entry is removed from buffer (i.e., marked as popped due to
// error injection), all the load entries that get data from the removed store
// entry must look for new source to refresh their data.
// Actually, we have to refresh store-commit as well.
//=============================================================================
void
MemoryAccessBuffer::refreshLoadEntry(list<MemoryAccessEntry>::iterator fromMark)
{
//cerr << "DBX: MemoryAccessBuffer::refreshLoadEntry\n";//DBX
list<MemoryAccessEntry>::iterator ii = fromMark;
while (ii != buf_.end()) {
ii++;
if (ii->getEntryType() == MEM_LOAD_DATA) {
// load_data
uint32_t cid = ii->getCoreId();
uint32_t tid = ii->getThrdId();
uint64_t addr = ii->getAddr();
switch (ii->getDsrc()) {
case DSRC_L1:
ii->setData(getL1Data(ii, cid, tid, addr));
break;
case DSRC_L2_MEMORY:
ii->setData(getL2Data(ii, cid, tid, addr));
break;
}
}
else if (ii->getEntryType() == MEM_LOAD_FILL) {
// load_fill
uint32_t cid = ii->getCoreId();
uint32_t tid = ii->getThrdId();
uint64_t addr = ii->getAddr();
list<MemoryAccessEntry>::iterator mlink = findLoadFillSrc(ii, cid, tid, addr, true);
if (mlink == buf_.end()) {
// read aligned 8 bytes
ii->setData((rif_->readMemory(cid, tid, (addr & ADDR_MASK), 8)));
}
else {
ii->setData(getL2Data(mlink, cid, tid, addr));
}
}
else if (ii->getEntryType() == MEM_STORE_COMMIT) {
// store_commit
if ((ii->getMerged() == true) && ((ii->getAddr() & ADDR_MASK) == (fromMark->getAddr() & ADDR_MASK))) {
// if we had to merge data for this SC entry, we need to re-do
// it again, because the previous SC entry may be thrown out
// by error injection.
uint32_t cid = ii->getCoreId();
uint32_t tid = ii->getThrdId();
uint64_t addr = ii->getAddr();
uint64_t mdata = getL2Data(ii, cid, tid, addr);
mdata = merge(mdata, ii->getOrigData(), ii->getOrigSizeV());
ii->setData(mdata);
}
}
}
}
//=============================================================================
//=============================================================================
uint64_t
MemoryAccessBuffer::getL1Data(list<MemoryAccessEntry>::iterator from, uint32_t cid, uint32_t tid, uint64_t addr)
{
list<MemoryAccessEntry>::iterator mae = findL1DataEntry(from, cid, tid, addr);
if (mae == buf_.end()) {
// read aligned 8 bytes
return (rif_->readMemory(cid, tid, (addr & ADDR_MASK), 8));
}
else {
return (mae->getData());
}
}
//=============================================================================
//=============================================================================
uint64_t
MemoryAccessBuffer::getL2Data(list<MemoryAccessEntry>::iterator from, uint32_t cid, uint32_t tid, uint64_t addr, bool executed)
{
list<MemoryAccessEntry>::iterator mae = findL2DataEntry(from, cid, tid, addr, executed);
if (mae == buf_.end()) {
// read aligned 8 bytes
return (rif_->readMemory(cid, tid, (addr & ADDR_MASK), 8));
}
else {
return (mae->getData());
}
}
//=============================================================================
// copied from MemorySync::merge()
//=============================================================================
uint64_t
MemoryAccessBuffer::merge (uint64_t todata, uint64_t fromdata, uint8_t mgvec)
{
uint64_t data;
uint64_t byteMask1 = byteMask(~mgvec);
uint64_t byteMask2 = byteMask(mgvec);
data = (todata & byteMask1) | (fromdata & byteMask2);
//MSYNC_DEBUG(4, "MemoryAccessBuffer::merge: todata=0x%llx fdata=0x%llx merge=0x%x result=0x%llx", todata, fromdata, (int) mgvec, data);
return (data);
}
//=============================================================================
// copied from MemorySync::byteMask()
//=============================================================================
uint64_t
MemoryAccessBuffer::byteMask(const uint8_t vbyte)
{
uint64_t mask = 0ull;
uint8_t bitSelector = 0x80; // 10000000
for (int i = 0; i < 8; i++) {
mask = mask << 8;
mask = ((vbyte & bitSelector) == 0) ? mask : mask | 0xffull;
bitSelector >>= 1;
}
return (mask);
}
//=============================================================================
// search for a matching dma_store_start entry with exe=0
//=============================================================================
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findDmaStoreStart(LoadStoreCmd& cmd, int type)
{
list<MemoryAccessEntry>::iterator ii = buf_.begin();
while (ii != buf_.end())
{
if ((ii->getEntryType() == MEM_DMA_STORE_START) &&
(!ii->isExecuted()))
{
if (type == DMA_EVICT)
{
// EVICT
if ((cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK))
{
return ii;
}
}
else if (type == DMA_EVICT_INV)
{
// EVICT_INV
uint32_t cbit = 1 << cmd.getCoreId();
if ((cmd.getSrcBank() == ii->getSrcBank()) &&
// set is always 0 at the moment
(cmd.getSet() == ii->getSet()) &&
// the address offset within a dcache line size must match
((cmd.getAddr() & (DCACHE_LINE_SIZE - 1)) == (ii->getAddr() & (DCACHE_LINE_SIZE - 1))) &&
(((ii->getInv() & cbit) != 0) && ((ii->getCinv() & cbit) == 0)))
{
return ii;
}
}
else if (type == DMA_STORE)
{
// corresponding DMA_STORE
if ((cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK))
{
return ii;
}
}
else
{
MS_ERROR("findDmaStoreStart given wrong type %d", type);
return buf_.end();
}
}
ii++;
}
// get here only if not finding any matched entry
return buf_.end();
}
//=============================================================================
// search for a matching dma_store entry with inv_vec==0 (EVICT), or one
// with the corresponding bit in inv_vec==1 (EVICT_INV)
//=============================================================================
list<MemoryAccessEntry>::iterator
MemoryAccessBuffer::findDmaStoreEntry(LoadStoreCmd& cmd, int type)
{
list<MemoryAccessEntry>::iterator ii = buf_.begin();
while (ii != buf_.end()) {
if ((ii->getEntryType() == MEM_STORE_COMMIT) &&
(ii->getDmaStore() == true)) {
if (type == DMA_EVICT) {
// EVICT
if (((cmd.getAddr() & ADDR_MASK) == (ii->getAddr() & ADDR_MASK)) &&
(ii->getInvSet() == false)) {
// inv_vec is not set yet, this is the one.
return ii;
}
}
else if (type == DMA_EVICT_INV) {
// EVICT_INV
uint32_t cbit = 1 << cmd.getCoreId();
if ((cmd.getSrcBank() == ii->getSrcBank()) &&
(cmd.getSet() == ii->getSet()) &&
// the address offset within a dcache line size must match
((cmd.getAddr() & (DCACHE_LINE_SIZE - 1)) == (ii->getAddr() & (DCACHE_LINE_SIZE - 1))) &&
(((ii->getInv() & cbit) != 0) && ((ii->getCinv() & cbit) == 0))) {
// if the corresponding bit in inv_vec is 1 and is not
// set yet, this is the one.
return ii;
}
}
else
{
MS_ERROR("findDmaStoreEntry given wrong type %d", type);
return buf_.end();
}
}
ii++;
}
// did not find any matched entry
return buf_.end();
}
//=============================================================================
// when a store_commit is completed (including related inv & upd), write the
// data back to memory if there is no address conflict with load entries in
// front of the store_commit in MAB. Even if the data is written back here,
// it will be written back to memory again when the store_commit reaches the
// head of MAB, though it is a duplicate act. ===> not working yet, 3/31/2006
//=============================================================================
bool
MemoryAccessBuffer::updateMemory(list<MemoryAccessEntry>::iterator stCommit)
{
if (buf_.size() <= 1) {
// there is at most one entry, nothing to check about, the data
// will be written back by normal MAB checking, no need to do it here.
return false;
}
else {
uint64_t addr = stCommit->getAddr();
list<MemoryAccessEntry>::iterator ii = stCommit;
bool writeBack = true;
do {
// 4/20/06. We said that Riesling could post a STCOM from the MAB
// to Global Memory early. We need to change the condition when
//this can be done.
//
// OLD
// If there is nothing in the MAB before the STCOM to the same
// L2 cache line PA[39:4].
//
// NEW
// If there is nothing in the MAB before the STCOM to the same
// L1 index PA[10:4]
//
// More details:
// It is correct that the STCOM had ivect=0 even though it
// doesn't seem to make sense. It has ivect=0 because there was
// an earlier Load to a different PA, but to the same L1 index
// that replaced the entry in the L1$. That is why the Store did
// not need to invalidate the L1$.
ii--;
if (ii->getEntryType() == MEM_LOAD_DATA) {
if ((!ii->isExecuted() && !ii->isPopped()) &&
((ii->getAddr() & L1_CACHE_INDEX_MASK) == (addr & L1_CACHE_INDEX_MASK))) {
// entry of the same cache line
// make it simpler and more conservative, if there is an entry
// of the same cache line in frontend of this store_commit,
// don't write the store_commit data back to memory.
writeBack = false;
}
}
else if ((ii->getAddr() & L2_CACHE_LINE_MASK) == (addr & L2_CACHE_LINE_MASK)) {
writeBack = false;
}
// if ((ii->getEntryType() == MEM_LOAD_DATA) &&
// (!ii->isExecuted() && !ii->isPopped())) {
// // there is a load from the same cache line which is
// // not completed yet, cannot write the store_commit data
// // back to memory
// writeBack = false;
// }
// else if ((ii->getEntryType() == MEM_STORE_COMMIT) &&
// ((!ii->isExecuted() && !ii->isPopped()) ||
// (ii->isExecuted() && !ii->isInvDone()))) {
// // there is a store to the same cache line which is
// // not completed yet, cannot write the store_commit data
// // back to memory
// writeBack = false;
// }
// else if ((ii->getEntryType() == MEM_STORE_UPDATE) ||
// (ii->getEntryType() == MEM_LOAD_FILL)) {
// // have to wait til no store_update/load_fill of the same
// // cache line is in front of this store_commit
// writeBack = false;
// }
// else if ((ii->getEntryType() == MEM_EVICT) &&
// (!ii->isInvDone())) {
// // if there is a incompleted MEM_EVICT in front, wait
// writeBack = false;
// }
// }
} while ((writeBack == true) && (ii != buf_.begin()));
if (writeBack == true) {
stCommit->setWriteBack();
}
return writeBack;
}
}