summaryrefslogtreecommitdiff
path: root/lib/workqueue.c
diff options
context:
space:
mode:
authorpaul <paul>2005-11-14 12:07:47 +0000
committerpaul <paul>2005-11-14 12:07:47 +0000
commit190880dc790007a14911ef8c170af33a50a7a674 (patch)
tree8de89e02883f522cd5d730a416ce771fd2f23ed2 /lib/workqueue.c
parent31a5976bb99f1875ebcfc29a6359f3a7ae81a795 (diff)
2005-11-14 Paul Jakma <paul.jakma@sun.com>
* (general) Add state to detect queue floods. There's no sense trying to be sparing of CPU resources, if the queue is flooding and using ever more memory resources. we should just get on with clearing the queue. The sense of delay and hold were wrong way around, fix. * workqueue.h: (struct work_queue) Add status bitfield. Add 'flood' integer to workqueue spec. Add runs_since_clear counter to workqueue. * workqueue.c: (work_queue_new) set defaults for delay, hold and flood. (work_queue_add) initial schedule should use delay, not hold. (show_work_queues) Print flood field, conserve whitespace. (work_queue_unplug) use delay, not hold. (work_queue_run) consecutive runs should be seperated by hold time, not delay. Keep track of number of consecutive runs, go into 'overdrive' if queue is being flooded, we can't avoid making heavy use of resources, better to use CPU than ever more RAM.
Diffstat (limited to 'lib/workqueue.c')
-rw-r--r--lib/workqueue.c38
1 files changed, 27 insertions, 11 deletions
diff --git a/lib/workqueue.c b/lib/workqueue.c
index bac41302..c2ff10db 100644
--- a/lib/workqueue.c
+++ b/lib/workqueue.c
@@ -80,7 +80,12 @@ work_queue_new (struct thread_master *m, const char *queue_name)
listnode_add (&work_queues, new);
new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
-
+
+ /* Default values, can be overriden by caller */
+ new->spec.delay = WORK_QUEUE_DEFAULT_DELAY;
+ new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
+ new->spec.flood = WORK_QUEUE_DEFAULT_FLOOD;
+
return new;
}
@@ -128,7 +133,7 @@ work_queue_add (struct work_queue *wq, void *data)
item->data = data;
listnode_add (wq->items, item);
- work_queue_schedule (wq, wq->spec.hold);
+ work_queue_schedule (wq, wq->spec.delay);
return;
}
@@ -167,12 +172,12 @@ DEFUN(show_work_queues,
struct work_queue *wq;
vty_out (vty,
- "%c %8s %11s %8s %21s%s",
- ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
+ "%c%c %8s %11s %8s %21s%s",
+ ' ', ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
VTY_NEWLINE);
vty_out (vty,
- "%c %8s %5s %5s %8s %7s %6s %6s %s%s",
- ' ',
+ "%c%c %8s %5s %5s %8s %7s %6s %6s %s%s",
+ 'P', 'F',
"Items",
"Delay","Hold",
"Total",
@@ -182,8 +187,9 @@ DEFUN(show_work_queues,
for (ALL_LIST_ELEMENTS_RO ((&work_queues), node, wq))
{
- vty_out (vty,"%c %8d %5d %5d %8ld %7d %6d %6u %s%s",
+ vty_out (vty,"%c%c %8d %5d %5d %8ld %7d %6d %6u %s%s",
(wq->flags == WQ_PLUGGED ? 'P' : ' '),
+ (wq->runs_since_clear >= wq->spec.flood ? 'F' : ' '),
listcount (wq->items),
wq->spec.delay, wq->spec.hold,
wq->runs,
@@ -220,7 +226,7 @@ work_queue_unplug (struct work_queue *wq)
wq->flags = WQ_UNPLUGGED;
/* if thread isnt already waiting, add one */
- work_queue_schedule (wq, wq->spec.hold);
+ work_queue_schedule (wq, wq->spec.delay);
}
/* timer thread to process a work queue
@@ -364,9 +370,19 @@ stats:
/* Is the queue done yet? If it is, call the completion callback. */
if (listcount (wq->items) > 0)
- work_queue_schedule (wq, wq->spec.delay);
- else if (wq->spec.completion_func)
- wq->spec.completion_func (wq);
+ {
+ if (++(wq->runs_since_clear) < wq->spec.flood)
+ work_queue_schedule (wq, wq->spec.hold);
+ else
+ work_queue_schedule (wq, 0); /* queue flooded, go into overdrive */
+ }
+ else
+ {
+ wq->runs_since_clear = 0;
+
+ if (wq->spec.completion_func)
+ wq->spec.completion_func (wq);
+ }
return 0;
}