Skip to content
Snippets Groups Projects
Commit 01d84d7f authored by Martin Karsten's avatar Martin Karsten
Browse files

- clerical updates

- remove numerous fastpath/slowpath designations -> impact not clear
parent 028c6fe9
No related branches found
No related tags found
No related merge requests found
......@@ -111,7 +111,7 @@ public:
GENASSERT1(!empty(), FmtHex(this));
T* last = head;
for (size_t i = 1; i < count; i += 1) {
if slowpath(last->link[NUM].next == nullptr) count = i; // breaks loop and sets count
if (last->link[NUM].next == nullptr) count = i; // breaks loop and sets count
else last = last->link[NUM].next;
}
head = last->link[NUM].next;
......@@ -189,7 +189,7 @@ public:
GENASSERT1(!empty(), FmtHex(this));
T* last = head;
for (size_t i = 1; i < count; i += 1) {
if slowpath(last->link[NUM].next == nullptr) count = i; // breaks loop and sets count
if (last->link[NUM].next == nullptr) count = i; // breaks loop and sets count
else last = last->link[NUM].next;
}
head = last->link[NUM].next;
......@@ -318,7 +318,7 @@ public:
GENASSERT1(test(first), FmtHex(&first));
T* last = &first;
for (size_t i = 1; i < count; i += 1) {
if slowpath(last->link[NUM].next == edge()) count = i; // breaks loop and sets count
if (last->link[NUM].next == edge()) count = i; // breaks loop and sets count
else last = last->link[NUM].next;
}
return remove(first, *last);
......
......@@ -36,10 +36,6 @@ struct Region {
static T error() { return limit<T>(); }
};
/* Note: The code below uses a lot of conditionals. The fastpath case is
* used for non-empty set where insertions might need to be merged with
* adjacent regions, but partial overlaps generally do not exist.
*/
template<typename R, typename A = std::allocator<R>>
class RegionSet : public std::set<R,std::less<R>,A> {
using baseclass = std::set<R,std::less<R>,A>;
......@@ -50,19 +46,19 @@ public:
R insert( R r ) {
// lower_bound finds lowest overlapping or adjacent/same-type region
iterator it = baseclass::lower_bound(r);
if slowpath(it == baseclass::end()) goto insert_now;
if (it == baseclass::end()) goto insert_now;
// if first region overlaps: merge
if slowpath(it->start < r.start) r.start = it->start;
if (it->start < r.start) r.start = it->start;
// remove all regions that are fully covered by inserted region
while (it->end <= r.end) {
it = baseclass::erase(it);
if slowpath(it == baseclass::end()) goto insert_now;
if (it == baseclass::end()) goto insert_now;
}
// if last region overlaps: merge
if slowpath(it->start <= r.end) {
if (it->start <= r.end) {
r.end = it->end;
it = baseclass::erase(it);
}
......@@ -75,11 +71,11 @@ insert_now:
template<bool fault=false>
bool remove( const R& r ) {
iterator it = baseclass::lower_bound(r);
if fastpath(it != baseclass::end() && it->start <= r.start && it->end >= r.end) {
if (it != baseclass::end() && it->start <= r.start && it->end >= r.end) {
R t = *it;
it = baseclass::erase(it); // it points to next, insert back, then front!
if slowpath(t.end > r.end) it = baseclass::emplace_hint(it, r.end, t.end);
if slowpath(t.start < r.start) baseclass::emplace_hint(it, t.start, r.start);
if (t.end > r.end) it = baseclass::emplace_hint(it, r.end, t.end);
if (t.start < r.start) baseclass::emplace_hint(it, t.start, r.start);
return true;
}
GENASSERT(fault);
......@@ -90,7 +86,7 @@ insert_now:
size_t retrieve_front(size_t s) {
for (auto it = baseclass::begin(); it != baseclass::end(); ++it) {
size_t astart = align_up(it->start, s);
if fastpath(it->end >= astart + s) {
if (it->end >= astart + s) {
remove( R(astart, astart + s) );
return astart;
}
......@@ -103,7 +99,7 @@ insert_now:
size_t retrieve_back(size_t s) {
for (auto it = baseclass::rbegin(); it != baseclass::rend(); ++it) {
size_t aend = align_down(it->end, s);
if fastpath(it->start <= aend - s) {
if (it->start <= aend - s) {
remove( R(aend - s, aend) );
return aend - s;
}
......
......@@ -41,7 +41,7 @@ public:
void acquire() {
size_t spin = SpinStart;
for (;;) {
if fastpath(!__atomic_test_and_set(&locked, __ATOMIC_SEQ_CST)) break;
if (!__atomic_test_and_set(&locked, __ATOMIC_SEQ_CST)) break;
for (size_t i = 0; i < spin; i += 1) Pause();
if (spin < SpinEnd) spin += spin;
while (locked) Pause();
......@@ -65,7 +65,7 @@ public:
size_t tryAcquire(T caller) {
if (owner != caller) {
if (owner != noOwner) return 0;
if slowpath(!_CAS((T*)&owner, noOwner, caller)) return 0;
if (!_CAS((T*)&owner, noOwner, caller)) return 0;
}
counter += 1;
return counter;
......@@ -74,7 +74,7 @@ public:
if (owner != caller) {
size_t spin = SpinStart;
for (;;) {
if fastpath(_CAS((T*)&owner, noOwner, caller)) break;
if (_CAS((T*)&owner, noOwner, caller)) break;
for (size_t i = 0; i < spin; i += 1) Pause();
if (spin < SpinEnd) spin += spin;
while (owner != noOwner) Pause();
......
......@@ -49,11 +49,11 @@ public:
void ticker() {
currTick += 1;
currTime.tv_nsec += tickNSEC;
if slowpath(currTime.tv_nsec >= tickSEC) {
if (currTime.tv_nsec >= tickSEC) {
currTime.tv_sec += 1;
currTime.tv_nsec = 0;
correction += errorNom;
while slowpath(correction > errorDenom * hz) {
while (correction > errorDenom * hz) {
correction -= errorDenom * hz;
currTick += 1;
currTime.tv_nsec += tickNSEC;
......
......@@ -197,7 +197,7 @@ void Process::exit(int result) {
tcbLock.acquire();
state = Exiting;
for (size_t i = 0; i < tcbStore.currentIndex(); i += 1) {
if fastpath(tcbStore.valid(i)) {
if (tcbStore.valid(i)) {
tcbStore.access(i).getRunner()->sigPending |= SIGTERM;
tcbStore.access(i).detach();
}
......
......@@ -20,8 +20,8 @@
#include <limits.h> // PTHREAD_STACK_MIN
// instance definitions for Context members
thread_local SystemProcessor* volatile Context::currProc; // lfbasics.h
thread_local StackContext* volatile Context::currStack; // lfbasics.h
thread_local SystemProcessor* volatile Context::currProc = nullptr; // lfbasics.h
thread_local StackContext* volatile Context::currStack = nullptr; // lfbasics.h
// noinline routines for Context
void Context::setCurrStack(StackContext& s, _friend<Runtime>) { currStack = &s; }
......
......@@ -52,7 +52,7 @@ inline bool BaseProcessor::tryDequeue(StackContext*& s) {
#else
s = readyQueue.dequeue();
#endif
if fastpath(s) {
if (s) {
stats->deq.count();
return true;
}
......@@ -61,7 +61,7 @@ inline bool BaseProcessor::tryDequeue(StackContext*& s) {
inline bool BaseProcessor::tryStage(StackContext*& s) {
s = cluster.stage();
if slowpath(s) { // staging expected to happen rarely
if (s) {
stats->stage.count();
s->changeResumeProcessor(*this, _friend<BaseProcessor>());
return true;
......@@ -72,7 +72,7 @@ inline bool BaseProcessor::tryStage(StackContext*& s) {
inline bool BaseProcessor::trySteal(StackContext*& s) {
#if TESTING_WORK_STEALING
s = cluster.steal();
if fastpath(s) {
if (s) {
#if TESTING_WORK_STEALING_STICKY
static const size_t stickyStealThreshold = TESTING_WORK_STEALING_STICKY;
if (s->getResumeProcessor().load() > stickyStealThreshold) {
......@@ -89,7 +89,7 @@ inline bool BaseProcessor::trySteal(StackContext*& s) {
inline bool BaseProcessor::tryBorrow(StackContext*& s) {
s = cluster.borrow();
if fastpath(s) {
if (s) {
stats->borrow.count();
return true;
}
......@@ -98,10 +98,10 @@ inline bool BaseProcessor::tryBorrow(StackContext*& s) {
StackContext* BaseProcessor::schedule(_friend<StackContext>) {
StackContext* nextStack;
if fastpath(tryDequeue(nextStack)) return nextStack;
if slowpath(terminate) return idleStack;
if slowpath(tryStage(nextStack)) return nextStack;
if fastpath(trySteal(nextStack)) return nextStack;
if fastpath(tryBorrow(nextStack)) return nextStack;
if (tryDequeue(nextStack)) return nextStack;
if (terminate) return idleStack;
if (tryStage(nextStack)) return nextStack;
if (trySteal(nextStack)) return nextStack;
if (tryBorrow(nextStack)) return nextStack;
return idleStack;
}
......@@ -389,7 +389,7 @@ protected:
bool internalAcquire(bool wait, const Time& timeout = Time::zero()) {
StackContext* cs = CurrStack();
lock.acquire();
if fastpath(!owner) {
if (!owner) {
owner = cs;
} else if (owner == cs) {
GENASSERT1(OwnerLock, FmtHex(owner));
......
......@@ -75,9 +75,9 @@ public:
}
VirtualProcessor& placement(_friend<StackContext>, bool bg = false, bool sg = false) {
if slowpath(bg) return backgroundProc;
if (bg) return backgroundProc;
#if TESTING_PLACEMENT_RR
if slowpath(sg) return stagingProc;
if (sg) return stagingProc;
ScopedLock<SystemLock> sl(ringLock);
GENASSERT(ringProc);
ringProc = ProcessorRing::next(*ringProc);
......@@ -96,7 +96,7 @@ public:
BaseProcessor* p = busyList.front();
while (p != busyList.edge()) {
StackContext* s = p->dequeue<true>(_friend<Cluster>());
if fastpath(s) return s;
if (s) return s;
p = ProcessorList::next(*p);
}
return nullptr;
......
......@@ -19,8 +19,8 @@
#define TESTING_NEMESIS_READYQUEUE 1 // vs. stub-based MPSC
//#define TESTING_BLOCKING_CONCURRENT 1 // using MPSC blocking semantics (no prio levels)
//#define TESTING_IDLE_SPIN 128 // spin before idle/halt threshold
//#define TESTING_MUTEX_FIFO 1 // try fifo/baton mutex
//#define TESTING_MUTEX_SPIN 1 // spin before block in Mutex class
//#define TESTING_MUTEX_FIFO 1 // use fifo/baton mutex
//#define TESTING_MUTEX_SPIN 1 // spin before block in non-fifo mutex
//#define TESTING_PLACEMENT_RR 1 // RR placement, instead of load-based
#define TESTING_WORK_STEALING 1 // enable work stealing (default transient)
#define TESTING_WORK_STEALING_STICKY 5 // sticky work stealing load threshold
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment