summaryrefslogtreecommitdiff
path: root/lib/thread.c
diff options
context:
space:
mode:
authorPaul Jakma <paul@quagga.net>2010-01-11 16:33:07 +0000
committerPaul Jakma <paul@quagga.net>2010-12-08 16:53:09 +0000
commit2613abe64fe48761d798942af8dc0ec90c804b22 (patch)
treec0f75f40bbd005bdd9d5545209a9402715c4f381 /lib/thread.c
parent8526100eebf9c56ff6fac2b80938b232bb687946 (diff)
lib: Thread scheduler should be fair and not let events starve I/O and timers
* thread.c: (thread_fetch) the current scheduler will service events indefinitely, ignoring I/O and timers, so long as there are events. In other words, events can crowd out I/O and timers. In theory this shouldn't be a huge problem as events are generated only by timers and I/O, however in practice it means normal-load behaviour is not as useful a predictor of high-load behaviour as it should be. Fix this by considering all the kinds of threads, in every run of the scheduler. For any given run, we prioritise events, however across runs the scheduler should be fair. This has been observed to give more stable inter-packet times in testing of ospfd (i.e. lower std-dev). (thread_process) new heler to queue all the given threads onto the ready list
Diffstat (limited to 'lib/thread.c')
-rw-r--r--lib/thread.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/lib/thread.c b/lib/thread.c
index af52b057..fb6fe375 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -903,6 +903,24 @@ thread_timer_process (struct thread_list *list, struct timeval *timenow)
return ready;
}
+/* process a list en masse, e.g. for event thread lists */
+static unsigned int
+thread_process (struct thread_list *list)
+{
+ struct thread *thread;
+ unsigned int ready = 0;
+
+ for (thread = list->head; thread; thread = thread->next)
+ {
+ thread_list_delete (list, thread);
+ thread->type = THREAD_READY;
+ thread_list_add (&thread->master->ready, thread);
+ ready++;
+ }
+ return ready;
+}
+
+
/* Fetch next ready thread. */
struct thread *
thread_fetch (struct thread_master *m, struct thread *fetch)
@@ -911,41 +929,48 @@ thread_fetch (struct thread_master *m, struct thread *fetch)
fd_set readfd;
fd_set writefd;
fd_set exceptfd;
- struct timeval timer_val;
+ struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
struct timeval timer_val_bg;
- struct timeval *timer_wait;
+ struct timeval *timer_wait = &timer_val;
struct timeval *timer_wait_bg;
while (1)
{
int num = 0;
- /* Signals are highest priority */
+ /* Signals pre-empt everything */
quagga_sigevent_process ();
- /* Normal event are the next highest priority. */
- if ((thread = thread_trim_head (&m->event)) != NULL)
- return thread_run (m, thread, fetch);
-
- /* If there are any ready threads from previous scheduler runs,
- * process top of them.
+ /* Drain the ready queue of already scheduled jobs, before scheduling
+ * more.
*/
if ((thread = thread_trim_head (&m->ready)) != NULL)
return thread_run (m, thread, fetch);
+ /* To be fair to all kinds of threads, and avoid starvation, we
+ * need to be careful to consider all thread types for scheduling
+ * in each quanta. I.e. we should not return early from here on.
+ */
+
+ /* Normal event are the next highest priority. */
+ thread_process (&m->event);
+
/* Structure copy. */
readfd = m->readfd;
writefd = m->writefd;
exceptfd = m->exceptfd;
/* Calculate select wait timer if nothing else to do */
- quagga_get_relative (NULL);
- timer_wait = thread_timer_wait (&m->timer, &timer_val);
- timer_wait_bg = thread_timer_wait (&m->background, &timer_val_bg);
-
- if (timer_wait_bg &&
- (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0)))
- timer_wait = timer_wait_bg;
+ if (m->ready.count == 0)
+ {
+ quagga_get_relative (NULL);
+ timer_wait = thread_timer_wait (&m->timer, &timer_val);
+ timer_wait_bg = thread_timer_wait (&m->background, &timer_val_bg);
+
+ if (timer_wait_bg &&
+ (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0)))
+ timer_wait = timer_wait_bg;
+ }
num = select (FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);