Add log_pruner function

parent d5574c4b
This diff is collapsed.
......@@ -48,10 +48,10 @@ struct CbLog
Start.load(std::memory_order_acquire);
}
// bool isEmpty() {
// return Start.load(std::memory_order_acquire) ==
// End.load(std::memory_order_acquire);
// }
bool isEmpty() {
return Start.load(std::memory_order_acquire) ==
End.load(std::memory_order_acquire);
}
};
template<class T>
......
......@@ -28,7 +28,7 @@ extern "C" {
// void nvm_rwlock_unlock(void *lock_address);
void nvm_store(void *addr, size_t size);
void nvm_log_alloc(void *addr);
// void nvm_log_free(void *addr);
void nvm_log_free(void *addr);
// void nvm_memset(void *addr, size_t sz);
// void nvm_memcpy(void *dst, size_t sz);
// void nvm_memmove(void *dst, size_t sz);
......
......@@ -128,7 +128,7 @@ public:
// void logStrcpy(void *dst, size_t sz);
// void logStrcat(void *dst, size_t sz);
void logAlloc(void *addr);
// void logFree(void *addr);
void logFree(void *addr);
// LogStructure *createLogStructure(LogEntry *le);
//
......@@ -165,8 +165,8 @@ public:
// void deleteOwnerInfo(LogEntry *le);
// void deleteEntry(LogEntry *addr)
// { deleteEntry<LogEntry>(CbLogList_, addr); }
// void deleteEntry(LogEntry *addr, CbListNode<LogEntry>* &last_cb)
// { deleteEntry(CbLogList_, addr, last_cb); }
void deleteEntry(LogEntry *addr, CbListNode<LogEntry>* &last_cb)
{ deleteEntry(CbLogList_, addr, last_cb); }
//
// void acquireStatsLock()
// { assert(Stats_); Stats_->acquireLock(); }
......@@ -339,9 +339,9 @@ private:
void * addr, size_t size_in_bits);
// LogEntry *createMemStrLogEntry(
// void *addr, size_t sz, LogType le_type);
// LogEntry *createDummyLogEntry();
//
// void pruneLogEntries();
LogEntry *createDummyLogEntry();
void pruneLogEntries();
void publishLogEntry(
LogEntry *le);
void signalHelper();
......@@ -395,8 +395,8 @@ private:
// void *addr, size_t sz);
//
// // Circular buffer management
// void deleteEntry(
// const std::atomic<CbListNode<LogEntry>*>& cb_list, LogEntry *addr, CbListNode<LogEntry> * &last_cb_used);
void deleteEntry(
const std::atomic<CbListNode<LogEntry>*>& cb_list, LogEntry *addr, CbListNode<LogEntry> * &last_cb_used);
template<class T> CbLog<T> *getNewCb(
uint32_t size, uint32_t rid, CbLog<T> **log_p,
std::atomic<CbListNode<T>*> *cb_list_p);
......@@ -405,8 +405,8 @@ private:
std::atomic<CbListNode<T>*> *cb_list_p);
// template<class T> void deleteEntry(
// const std::atomic<CbListNode<T>*>& cb_list, T *addr);
// template<class T> void deleteSlot(
// CbLog<T> *cb, T *addr);
template<class T> void deleteSlot(
CbLog<T> *cb, T *addr);
};
......
......@@ -85,52 +85,52 @@ struct LogStructure
// getting published.
LogStructure *Next;
};
//
//static inline bool isDummy(LogType le_type)
//{
// return le_type == LE_dummy;
//}
//
//static inline bool isAcquire(LogType le_type)
//{
// return le_type == LE_acquire;
//}
//
//static inline bool isRWLockRdLock(LogType le_type)
//{
// return le_type == LE_rwlock_rdlock;
//}
//
//static inline bool isRWLockWrLock(LogType le_type)
//{
// return le_type == LE_rwlock_wrlock;
//}
//
//static inline bool isBeginDurable(LogType le_type)
//{
// return le_type == LE_begin_durable;
//}
//
//static inline bool isRelease(LogType le_type)
//{
// return le_type == LE_release;
//}
//
//static inline bool isRWLockUnlock(LogType le_type)
//{
// return le_type == LE_rwlock_unlock;
//}
//
//static inline bool isEndDurable(LogType le_type)
//{
// return le_type == LE_end_durable;
//}
//
//static inline bool isStr(LogType le_type)
//{
// return le_type == LE_str;
//}
//
static inline bool isDummy(LogType le_type)
{
return le_type == LE_dummy;
}
static inline bool isAcquire(LogType le_type)
{
return le_type == LE_acquire;
}
static inline bool isRWLockRdLock(LogType le_type)
{
return le_type == LE_rwlock_rdlock;
}
static inline bool isRWLockWrLock(LogType le_type)
{
return le_type == LE_rwlock_wrlock;
}
static inline bool isBeginDurable(LogType le_type)
{
return le_type == LE_begin_durable;
}
static inline bool isRelease(LogType le_type)
{
return le_type == LE_release;
}
static inline bool isRWLockUnlock(LogType le_type)
{
return le_type == LE_rwlock_unlock;
}
static inline bool isEndDurable(LogType le_type)
{
return le_type == LE_end_durable;
}
static inline bool isStr(LogType le_type)
{
return le_type == LE_str;
}
//static inline bool isMemset(LogType le_type)
//{
// return le_type == LE_memset;
......@@ -146,16 +146,16 @@ struct LogStructure
// return le_type == LE_memmove;
//}
//
//static inline bool isMemop(LogType le_type)
//{
// return le_type == LE_memset || le_type == LE_memcpy ||
// le_type == LE_memmove;
//}
//
//static inline bool isAlloc(LogType le_type)
//{
// return le_type == LE_alloc;
//}
static inline bool isMemop(LogType le_type)
{
return le_type == LE_memset || le_type == LE_memcpy ||
le_type == LE_memmove;
}
static inline bool isAlloc(LogType le_type)
{
return le_type == LE_alloc;
}
static inline bool isFree(LogType le_type)
{
......@@ -171,24 +171,24 @@ static inline bool isFree(LogType le_type)
//{
// return le_type == LE_strcat;
//}
//
//static inline bool isStrop(LogType le_type)
//{
// return le_type == LE_strcpy || le_type == LE_strcat;
//}
//
//static inline bool isStartSection(LogType le_type)
//{
// return isAcquire(le_type) || isRWLockRdLock(le_type) ||
// isRWLockWrLock(le_type) || isBeginDurable(le_type);
//}
//
//static inline bool isEndSection(LogType le_type)
//{
// return isRelease(le_type) || isRWLockUnlock(le_type) ||
// isEndDurable(le_type);
//}
//
static inline bool isStrop(LogType le_type)
{
return le_type == LE_strcpy || le_type == LE_strcat;
}
static inline bool isStartSection(LogType le_type)
{
return isAcquire(le_type) || isRWLockRdLock(le_type) ||
isRWLockWrLock(le_type) || isBeginDurable(le_type);
}
static inline bool isEndSection(LogType le_type)
{
return isRelease(le_type) || isRWLockUnlock(le_type) ||
isEndDurable(le_type);
}
} // namespace Atlas
#endif
......@@ -55,12 +55,12 @@ public:
// void *get_start_addr() const { return StartAddr_; }
// void *get_end_addr() const { return EndAddr_; }
// uint64_t get_actual_alloced() const { return ActualAlloced_; }
//
// bool doesRangeCheck(void *start, size_t sz) const
// { return start >= StartAddr_ &&
// (static_cast<char*>(start) + sz) <
// static_cast<char*>(EndAddr_); }
//
bool doesRangeCheck(void *start, size_t sz) const
{ return start >= StartAddr_ &&
(static_cast<char*>(start) + sz) <
static_cast<char*>(EndAddr_); }
void *allocMem(
size_t sz, bool does_need_cache_line_alignment,
bool does_need_logging);
......@@ -71,10 +71,10 @@ public:
size_t sz, bool does_need_cache_line_alignment,
bool does_need_logging);
void *allocRawMem(size_t);
//
// void freeMem(void *ptr, bool should_log);
//
// void Lock() { pthread_mutex_lock(&Lock_); }
void freeMem(void *ptr, bool should_log);
void Lock() { pthread_mutex_lock(&Lock_); }
int tryLock() { return pthread_mutex_trylock(&Lock_); }
void Unlock() { pthread_mutex_unlock(&Lock_); }
......@@ -107,7 +107,7 @@ private:
void deleteFromFreeList(uint32_t bin_no, void *mem);
void incrementActualAllocedStats(size_t sz);
// void decrementActualAllocedStats(size_t sz);
void decrementActualAllocedStats(size_t sz);
};
inline void PArena::initAllocAddresses(void *start_addr)
......@@ -132,13 +132,13 @@ inline void PArena::incrementActualAllocedStats(size_t sz)
#endif
}
//inline void PArena::decrementActualAllocedStats(size_t sz)
//{
//#if defined(ATLAS_ALLOC_STATS)
// ActualAlloced_ -= sz;
// NVM_FLUSH(&ActualAlloced_);
//#endif
//}
inline void PArena::decrementActualAllocedStats(size_t sz)
{
#if defined(ATLAS_ALLOC_STATS)
ActualAlloced_ -= sz;
NVM_FLUSH(&ActualAlloced_);
#endif
}
} // namespace Atlas
......
......@@ -52,10 +52,10 @@ public:
{ return static_cast<void*>(
static_cast<char*>(mem) + get_metadata_size()); }
// static void *ptr2mem(void *ptr)
// { return static_cast<void*>(
// static_cast<char*>(ptr) - get_metadata_size()); }
//
static void *ptr2mem(void *ptr)
{ return static_cast<void*>(
static_cast<char*>(ptr) - get_metadata_size()); }
static size_t get_alignment()
{ return 2*sizeof(size_t); }
......@@ -75,25 +75,25 @@ public:
static size_t get_requested_alloc_size_from_mem(void *mem)
{ return *(static_cast<size_t*>(mem)); }
// static size_t get_requested_alloc_size_from_ptr(void *ptr)
// { void *mem = ptr2mem(ptr);
// return *(static_cast<size_t*>(mem)); }
static size_t get_requested_alloc_size_from_ptr(void *ptr)
{ void *mem = ptr2mem(ptr);
return *(static_cast<size_t*>(mem)); }
static size_t *get_is_allocated_ptr_from_mem(void *mem)
{ return reinterpret_cast<size_t*>(
static_cast<char*>(mem) + sizeof(size_t)); }
// static size_t *get_is_allocated_ptr_from_ptr(void *ptr)
// { void *mem = ptr2mem(ptr);
// return reinterpret_cast<size_t*>(
// static_cast<char*>(mem) + sizeof(size_t)); }
//
static size_t *get_is_allocated_ptr_from_ptr(void *ptr)
{ void *mem = ptr2mem(ptr);
return reinterpret_cast<size_t*>(
static_cast<char*>(mem) + sizeof(size_t)); }
static bool is_mem_allocated(void *mem)
{ return *get_is_allocated_ptr_from_mem(mem) == true; }
//
// static bool is_ptr_allocated(void *ptr)
// { return *get_is_allocated_ptr_from_ptr(ptr) == true; }
//
static bool is_ptr_allocated(void *ptr)
{ return *get_is_allocated_ptr_from_ptr(ptr) == true; }
static uint32_t get_next_bin_number(uint32_t bin_number)
{
assert(bin_number && "Non-zero bin number!");
......
......@@ -69,7 +69,7 @@ public:
bool does_need_logging);
// void *callocMem(size_t nmemb, size_t sz);
// void *reallocMem(void*, size_t);
// void freeMem(void *ptr, bool should_log);
void freeMem(void *ptr, bool should_log);
void setRoot(void *new_root)
{
......@@ -130,14 +130,14 @@ private:
bool does_need_cache_line_alignment, bool does_need_logging);
void flushDirtyCacheLines();
};
//
//inline void PRegion::freeMem(void *ptr, bool should_log)
//{
// uint32_t arena_index = (reinterpret_cast<intptr_t>(ptr) -
// reinterpret_cast<intptr_t>(BaseAddr_))/kArenaSize_;
// getArena(arena_index)->freeMem(ptr, should_log);
//}
//
inline void PRegion::freeMem(void *ptr, bool should_log)
{
uint32_t arena_index = (reinterpret_cast<intptr_t>(ptr) -
reinterpret_cast<intptr_t>(BaseAddr_))/kArenaSize_;
getArena(arena_index)->freeMem(ptr, should_log);
}
inline void PRegion::initArenaAllocAddresses()
{
for (uint32_t i = 0; i < kNumArenas_; ++i)
......
......@@ -76,9 +76,9 @@ public:
bool does_need_cache_line_alignment, bool does_need_logging) const;
// void *callocMem(size_t nmemb, size_t sz, region_id_t) const;
// void *reallocMem(void*, size_t, region_id_t) const;
// void freeMem(void *ptr, bool should_log = true) const;
void freeMem(void *ptr, bool should_log = true) const;
// void deleteMem(void *ptr, bool should_log = true) const;
// void freeMemImpl(region_id_t rgn_id, void *ptr, bool should_log) const;
void freeMemImpl(region_id_t rgn_id, void *ptr, bool should_log) const;
//
void *allocMemWithoutLogging(size_t sz, region_id_t rid) const;
void *allocMemCacheLineAligned(
......
......@@ -194,28 +194,28 @@ LogEntry *LogMgr::createStrLogEntry(void *addr, size_t size_in_bits)
//#endif
// return le;
//}
//
/////
///// @brief Create a dummy log entry
///// @retval Pointer to created log entry
/////
//LogEntry *LogMgr::createDummyLogEntry()
//{
//#ifdef _FORCE_FAIL
// fail_program();
//#endif
// LogEntry *le = allocLogEntry();
// assert(le);
//
//#if defined(_USE_MOVNT)
// logNonTemporal(le, 0, 0, LE_dummy);
//#else
// memset(le, 0, sizeof(LogEntry));
// le->Type = LE_dummy;
//#endif
// return le;
//}
//
///
/// @brief Create a dummy log entry
/// @retval Pointer to created log entry
///
LogEntry *LogMgr::createDummyLogEntry()
{
#ifdef _FORCE_FAIL
fail_program();
#endif
LogEntry *le = allocLogEntry();
assert(le);
#if defined(_USE_MOVNT)
logNonTemporal(le, 0, 0, LE_dummy);
#else
memset(le, 0, sizeof(LogEntry));
le->Type = LE_dummy;
#endif
return le;
}
/////
///// @brief Create a thread specific log header
///// @param le First log entry for this thread
......
......@@ -23,39 +23,39 @@
namespace Atlas {
//void LogMgr::pruneLogEntries()
//{
// if(!TL_LogStructure_) return;
//
// LogEntry* le = TL_LogStructure_->Le;
// LogEntry* leNext;
// TL_LogStructure_->Le = nullptr;
//// TL_FirstLogEntry_->Next.store(nullptr,std::memory_order_release);
// static CbListNode<LogEntry> *last_cb_used = 0;
// while(le) {
// leNext = le->Next.load(std::memory_order_acquire);
// #if defined(_LOG_WITH_MALLOC)
// if (le->isMemop() || le->isStrop())
// free((void*)le->ValueOrPtr);
// free(le);
// #elif defined(_LOG_WITH_NVM_ALLOC)
// if (le->isMemop() || le->isStrop())
// PRegionMgr::getInstance().freeMem((void*)le->ValueOrPtr, true);
// PRegionMgr::getInstance().freeMem(le,true);
// #else
// if (le->isMemop() || le->isStrop())
// PRegionMgr::getInstance().freeMem((void*)le->ValueOrPtr, true);
// LogMgr::getInstance().deleteEntry(le,last_cb_used);
// #endif
// le = leNext;
// }
// LogEntry *dummy_le = createDummyLogEntry();
// flushLogUncond(dummy_le);
// TL_LogStructure_->Le = dummy_le;
// TL_LastLogEntry_ = dummy_le;
//// TL_LastLogEntry_ = TL_FirstLogEntry_;
// return;
//}
void LogMgr::pruneLogEntries()
{
if(!TL_LogStructure_) return;
LogEntry* le = TL_LogStructure_->Le;
LogEntry* leNext;
TL_LogStructure_->Le = nullptr;
// TL_FirstLogEntry_->Next.store(nullptr,std::memory_order_release);
static CbListNode<LogEntry> *last_cb_used = 0;
while(le) {
leNext = le->Next.load(std::memory_order_acquire);
#if defined(_LOG_WITH_MALLOC)
if (le->isMemop() || le->isStrop())
free((void*)le->ValueOrPtr);
free(le);
#elif defined(_LOG_WITH_NVM_ALLOC)
if (le->isMemop() || le->isStrop())
PRegionMgr::getInstance().freeMem((void*)le->ValueOrPtr, true);
PRegionMgr::getInstance().freeMem(le,true);
#else
if (le->isMemop() || le->isStrop())
PRegionMgr::getInstance().freeMem((void*)le->ValueOrPtr, true);
LogMgr::getInstance().deleteEntry(le,last_cb_used);
#endif
le = leNext;
}
LogEntry *dummy_le = createDummyLogEntry();
flushLogUncond(dummy_le);
TL_LogStructure_->Le = dummy_le;
TL_LastLogEntry_ = dummy_le;
// TL_LastLogEntry_ = TL_FirstLogEntry_;
return;
}
///
/// @brief Given a log entry, publish it to other threads
......
......@@ -249,31 +249,31 @@ void LogMgr::logAlloc(void *addr)
// TL_LastLogEntry_ = le;
}
//void LogMgr::logFree(void *addr)
//{
//#ifdef _FORCE_FAIL
// fail_program();
//#endif
// // TODO: use the arena lock for log elision
// if (tryLogElision(NULL, 0)) return;
//
// LogEntry *le = createAllocationLogEntry(addr, LE_free);
//
//#ifndef _NO_NEST
// // If there is a previous free, create a happens after link free -> free
// // In that case, the following call will set the Size_ of the new
// // log entry as well
// setHappensBeforeForAllocFree(le);
//#endif
//
// publishLogEntry(le);
//
//#ifndef _NO_NEST
// addLogToLastReleaseInfo(le, *new MapOfLockInfo);
//#endif
//
//// TL_LastLogEntry_ = le;
//}
void LogMgr::logFree(void *addr)
{
#ifdef _FORCE_FAIL
fail_program();
#endif
// TODO: use the arena lock for log elision
if (tryLogElision(NULL, 0)) return;
LogEntry *le = createAllocationLogEntry(addr, LE_free);
#ifndef _NO_NEST
// If there is a previous free, create a happens after link free -> free
// In that case, the following call will set the Size_ of the new
// log entry as well
setHappensBeforeForAllocFree(le);
#endif
publishLogEntry(le);
#ifndef _NO_NEST
addLogToLastReleaseInfo(le, *new MapOfLockInfo);
#endif
// TL_LastLogEntry_ = le;
}
} // namespace Atlas
......
......@@ -134,12 +134,12 @@ void nvm_log_alloc(void *addr)
Atlas::LogMgr::getInstance().logAlloc(addr);
}
//void nvm_log_free(void *addr)
//{
// if (!Atlas::LogMgr::hasInstance()) return;
// Atlas::LogMgr::getInstance().logFree(addr);
//}
//
void nvm_log_free(void *addr)
{
if (!Atlas::LogMgr::hasInstance()) return;
Atlas::LogMgr::getInstance().logFree(addr);
}
void nvm_barrier(void *p)
{
if (!NVM_IsInOpenPR(p, 1)) return;
......
......@@ -30,48 +30,48 @@ uint32_t PMallocUtil::CacheLineSize_{UINT32_MAX};
uintptr_t PMallocUtil::CacheLineMask_{UINTPTR_MAX};
thread_local uint32_t PMallocUtil::TL_CurrArena_[kMaxNumPRegions_] = {};
/////
///// Given a pointer to persistent memory, mark the location free and
///// add it to the free list.
/////
//void PArena::freeMem(void *ptr, bool should_log)
//{
//#ifdef _FORCE_FAIL
// fail_program();
//#endif
// Lock();
//
// if (!PMallocUtil::is_ptr_allocated(ptr))
// {
// fprintf(stderr, "[Atlas-pheap] assert: %p %ld %ld\n",
// ptr, *((size_t *)ptr),
// *(size_t *)((char *)ptr+sizeof(size_t)));
// assert(PMallocUtil::is_ptr_allocated(ptr) &&
// "free called on unallocated memory");
// }
//
// char *mem = (char*)PMallocUtil::ptr2mem(ptr);
// assert(doesRangeCheck(mem, *(reinterpret_cast<size_t*>(mem))) &&
// "Attempt to free memory outside of arena range!");
//
//#ifndef _DISABLE_ALLOC_LOGGING
// if (should_log) nvm_log_free(mem + sizeof(size_t));
//#endif
//
// *(size_t*)(mem + sizeof(size_t)) = false;
// NVM_FLUSH(mem + sizeof(size_t));
//
// insertToFreeList(PMallocUtil::get_bin_number(
// PMallocUtil::get_requested_alloc_size_from_ptr(ptr)),
// PMallocUtil::ptr2mem(ptr));
//
// decrementActualAllocedStats(
// PMallocUtil::get_actual_alloc_size(
// PMallocUtil::get_requested_alloc_size_from_ptr(ptr)));
//
// Unlock();
//}
//
///
/// Given a pointer to persistent memory, mark the location free and
/// add it to the free list.
///
void PArena::freeMem(void *ptr, bool should_log)
{
#ifdef _FORCE_FAIL
fail_program();
#endif
Lock();
if (!PMallocUtil::is_ptr_allocated(ptr))
{
fprintf(stderr, "[Atlas-pheap] assert: %p %ld %ld\n",
ptr, *((size_t *)ptr),
*(size_t *)((char *)ptr+sizeof(size_t)));
assert(PMallocUtil::is_ptr_allocated(ptr) &&
"free called on unallocated memory");
}
char *mem = (char*)PMallocUtil::ptr2mem(ptr);
assert(doesRangeCheck(mem, *(reinterpret_cast<size_t*>(mem))) &&
"Attempt to free memory outside of arena range!");
#ifndef _DISABLE_ALLOC_LOGGING
if (should_log) nvm_log_free(mem + sizeof(size_t));
#endif
*(size_t*)(mem + sizeof(size_t)) = false;
NVM_FLUSH(mem + sizeof(size_t));
insertToFreeList(PMallocUtil::get_bin_number(
PMallocUtil::get_requested_alloc_size_from_ptr(ptr)),
PMallocUtil::ptr2mem(ptr));
decrementActualAllocedStats(
PMallocUtil::get_actual_alloc_size(
PMallocUtil::get_requested_alloc_size_from_ptr(ptr)));
Unlock();
}
///
/// Given a size, allocate memory using the bump pointer. If it
/// reaches the end of the arena, return null.
......
......@@ -42,23 +42,23 @@ namespace Atlas {
PRegionMgr *PRegionMgr::Instance_{nullptr};
/////
///// Entry point for freeing a persistent location
/////
//void PRegionMgr::freeMem(void *ptr, bool should_log) const
//{
//#ifdef _FORCE_FAIL
// fail_program();
//#endif
// // Correct size unknown at this point since it may be in transient mem
// region_id_t rgn_id = getOpenPRegionId(ptr, 1 /* dummy */);
// if (rgn_id == kInvalidPRegion_) { // transient memory
// free(ptr);
// return;
// }
// freeMemImpl(rgn_id, ptr, should_log);