Commit 04218d80 authored by Martin Karsten's avatar Martin Karsten

- clerical updates

parent ae6480f2
Pipeline #40286 passed with stage
in 3 minutes and 31 seconds
# Untracked Files
# Untracked Directories
html
doc/html
ifeq ($(CC),clang)
CXX=clang++
else
endif
# this test works for clang invoked as 'cc' on FreeBSD
ifeq ($(shell $(CC) --version|head -1|fgrep -q clang),0)
TLSFLAGS=-fno-extern-tls-init -mtls-dialect=gnu2
endif
......
......@@ -50,8 +50,13 @@ static_assert(sizeof(DebugOptions)/sizeof(char*) == DBG::Level::MaxLevel, "debug
static std::atomic<int> initCounter(0);
#if TESTING_ENABLE_STATISTICS
static std::ios ioFormatFlags(NULL);
#endif
static void FibreCleanup() {
#if TESTING_ENABLE_STATISTICS
std::cout.copyfmt(ioFormatFlags);
StatsObject::printAll(std::cout);
delete StatsObject::lst;
#endif
......@@ -60,6 +65,7 @@ static void FibreCleanup() {
EventScope* FibreInit(size_t pollerCount, size_t workerCount) {
if (++initCounter == 1) {
#if TESTING_ENABLE_STATISTICS
ioFormatFlags.copyfmt(std::cout);
StatsObject::lst = new IntrusiveQueue<StatsObject>;
#endif
// register cleanup routine
......
......@@ -93,7 +93,9 @@ class EventScope {
void initIO() {
struct rlimit rl;
SYSCALL(getrlimit(RLIMIT_NOFILE, &rl)); // get hard limit for file descriptor count
fdCount = rl.rlim_max + MasterPoller::extraTimerFD;
rl.rlim_max = rl.rlim_cur; // firm up current FD limit
SYSCALL(setrlimit(RLIMIT_NOFILE, &rl)); // and install maximum
fdCount = rl.rlim_max + MasterPoller::extraTimerFD; // add fake timer fd, if necessary
fdSyncVector = new SyncFD[fdCount]; // create vector of R/W sync points
masterPoller = new MasterPoller(*this, fdCount, _friend<EventScope>()); // start master poller & timer handling
mainCluster->startPolling(_friend<EventScope>()); // start polling now (potentially new event scope)
......
......@@ -123,7 +123,11 @@ public:
epoll_event ev;
ev.events = EPOLLET | status; // man 2 epoll_ctl: EPOLLERR, EPOLLHUP not needed
ev.data.fd = fd;
SYSCALL(epoll_ctl(pollFD, change ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev));
if (change) {
SYSCALL(epoll_ctl(pollFD, EPOLL_CTL_MOD, fd, &ev));
} else {
SYSCALL(epoll_ctl(pollFD, EPOLL_CTL_ADD, fd, &ev));
}
#endif
}
......@@ -226,7 +230,11 @@ public:
epoll_event ev;
ev.events = EPOLLIN | EPOLLONESHOT;
ev.data.fd = fd;
SYSCALL(epoll_ctl(pollFD, change ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev));
if (change) {
SYSCALL(epoll_ctl(pollFD, EPOLL_CTL_MOD, fd, &ev));
} else {
SYSCALL(epoll_ctl(pollFD, EPOLL_CTL_ADD, fd, &ev));
}
#endif
}
};
......
......@@ -103,29 +103,37 @@ extern "C" void invokeStack(funcvoid3_t func, ptr_t arg1, ptr_t arg2, ptr_t arg3
inline void StackContext::yieldTo(StackContext& nextStack) {
CHECK_PREEMPTION(1); // expect preemption still enabled
RuntimeDisablePreemption();
Context::CurrStack()->switchStack<Yield>(nextStack);
switchStack<Yield>(nextStack);
RuntimeEnablePreemption();
}
inline void StackContext::yieldResume(StackContext& nextStack) {
CHECK_PREEMPTION(1); // expect preemption still enabled
RuntimeDisablePreemption();
Context::CurrStack()->switchStack<Resume>(nextStack);
switchStack<Resume>(nextStack);
RuntimeEnablePreemption();
}
inline void StackContext::yieldForce() {
yieldResume(Context::CurrProcessor().scheduleFull(_friend<StackContext>()));
}
bool StackContext::yield() {
StackContext* nextStack = Context::CurrProcessor().scheduleYield(_friend<StackContext>());
if (nextStack) yieldTo(*nextStack);
if (nextStack) Context::CurrStack()->yieldTo(*nextStack);
return nextStack;
}
bool StackContext::yieldGlobal() {
StackContext* nextStack = Context::CurrProcessor().scheduleYieldGlobal(_friend<StackContext>());
if (nextStack) yieldTo(*nextStack);
if (nextStack) Context::CurrStack()->yieldTo(*nextStack);
return nextStack;
}
void StackContext::forceYield() {
Context::CurrStack()->yieldForce();
}
void StackContext::idleYieldTo(StackContext& nextStack, _friend<BaseProcessor>) {
CHECK_PREEMPTION(1); // expect preemption still enabled
RuntimeDisablePreemption();
......@@ -160,7 +168,7 @@ void StackContext::migrateNow(BaseProcessor& proc) {
StackContext* sc = Context::CurrStack();
sc->affinity = false;
sc->processor = &proc;
sc->yieldResume(Context::CurrProcessor().scheduleFull(_friend<StackContext>()));
sc->yieldForce();
}
// migrate to scheduler (for disk I/O), don't change affinity
......@@ -168,7 +176,7 @@ BaseProcessor& StackContext::migrateNow(Scheduler& scheduler, _friend<EventScope
StackContext* sc = Context::CurrStack();
BaseProcessor* proc = sc->processor;
sc->processor = &scheduler.placement(_friend<StackContext>(), true);
sc->yieldResume(Context::CurrProcessor().scheduleFull(_friend<StackContext>()));
sc->yieldForce();
return *proc;
}
......@@ -176,5 +184,5 @@ BaseProcessor& StackContext::migrateNow(Scheduler& scheduler, _friend<EventScope
void StackContext::migrateNow(BaseProcessor& proc, _friend<EventScope>) {
StackContext* sc = Context::CurrStack();
sc->processor = &proc;
sc->yieldResume(Context::CurrProcessor().scheduleFull(_friend<StackContext>()));
sc->yieldForce();
}
......@@ -91,8 +91,9 @@ class StackContext : public DoubleLink<StackContext,StackLinkCount> {
void suspendInternal();
void resumeInternal();
void resumeDirect();
static inline void yieldTo(StackContext& nextStack);
static inline void yieldResume(StackContext& nextStack);
inline void yieldTo(StackContext& nextStack);
inline void yieldResume(StackContext& nextStack);
inline void yieldForce();
protected:
// constructor/destructors can only be called by derived classes
......@@ -127,6 +128,7 @@ public:
// context switching - static -> apply to Context::CurrStack()
static bool yield();
static bool yieldGlobal();
static void forceYield();
static void idleYieldTo(StackContext& nextStack, _friend<BaseProcessor>);
static void preempt();
static void terminate() __noreturn;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment