summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2007-03-26 03:24:36 +0000
committerMichael Sevakis <jethead71@rockbox.org>2007-03-26 03:24:36 +0000
commit165f62d0cd771660e4b8d2ba7475e14d0d6f2e9f (patch)
tree2818158a5c6f250dc928319d7cc6c91788519b78
parent5e2984ad80a732f39b8d0df130176e67b30b9469 (diff)
downloadrockbox-165f62d0cd771660e4b8d2ba7475e14d0d6f2e9f.zip
rockbox-165f62d0cd771660e4b8d2ba7475e14d0d6f2e9f.tar.gz
rockbox-165f62d0cd771660e4b8d2ba7475e14d0d6f2e9f.tar.bz2
rockbox-165f62d0cd771660e4b8d2ba7475e14d0d6f2e9f.tar.xz
Fix a hole in the scheduler where collisions between waking blocked threads in mutexes with interrupts waking blocked threads in message queues can occur. Queue posts will put the threads on a separate list that is then added to the running list with IRQs disabled on the next task switch or CPU wakeup. Basically no overhead for other operations. Seems a likely cause of my occasional observation of the backlight fade causing playback threads to stop running and a recently reported blocking violation upon USB plugging. Time will tell but banging the backlight on and off frequently hasn't hiccuped again for me on H120.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@12915 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/export/thread.h3
-rw-r--r--firmware/kernel.c7
-rw-r--r--firmware/thread.c67
3 files changed, 70 insertions, 7 deletions
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index c9132af..279ea44 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -119,6 +119,8 @@ struct core_entry {
struct thread_entry threads[MAXTHREADS];
struct thread_entry *running;
struct thread_entry *sleeping;
+ struct thread_entry *waking;
+ struct thread_entry **wakeup_list;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
int switch_to_irq_level;
#define STAY_IRQ_LEVEL -1
@@ -193,6 +195,7 @@ void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
#endif
#endif
void wakeup_thread(struct thread_entry **thread);
+void wakeup_thread_irq_safe(struct thread_entry **thread);
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(struct thread_entry *thread, int priority);
int thread_get_priority(struct thread_entry *thread);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index c5e47a8..e09edef 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -126,7 +126,7 @@ static void queue_release_sender(struct thread_entry **sender,
intptr_t retval)
{
(*sender)->retval = retval;
- wakeup_thread(sender);
+ wakeup_thread_irq_safe(sender);
#if 0
/* This should _never_ happen - there must never be multiple
threads in this list and it is a corrupt state */
@@ -289,11 +289,14 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
}
#endif
- wakeup_thread(&q->thread);
+ wakeup_thread_irq_safe(&q->thread);
set_irq_level(oldlevel);
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
+ use of this function - we only aim to protect the queue integrity by
+ turning them off. */
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
diff --git a/firmware/thread.c b/firmware/thread.c
index 8022d94..cbe12b4 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -289,15 +289,56 @@ void check_sleepers(void)
}
}
+/* Safely finish waking all threads potentialy woken by interrupts -
+ * statearg already zeroed in wakeup_thread. */
+static void wake_list_awaken(void)
+{
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ /* No need for another check in the IRQ lock since IRQs are allowed
+ only to add threads to the waking list. They won't be adding more
+ until we're done here though. */
+
+ struct thread_entry *waking = cores[CURRENT_CORE].waking;
+ struct thread_entry *running = cores[CURRENT_CORE].running;
+
+ if (running != NULL)
+ {
+ /* Place waking threads at the end of the running list. */
+ struct thread_entry *tmp;
+ waking->prev->next = running;
+ running->prev->next = waking;
+ tmp = running->prev;
+ running->prev = waking->prev;
+ waking->prev = tmp;
+ }
+ else
+ {
+ /* Just transfer the list as-is - just came out of a core
+ * sleep. */
+ cores[CURRENT_CORE].running = waking;
+ }
+
+ /* Done with waking list */
+ cores[CURRENT_CORE].waking = NULL;
+ set_irq_level(oldlevel);
+}
+
static inline void sleep_core(void)
{
static long last_tick = 0;
#if CONFIG_CPU == S3C2440
int i;
#endif
-
+
for (;;)
{
+ /* We want to do these ASAP as it may change the decision to sleep
+ the core or the core has woken because an interrupt occurred
+ and posted a message to a queue. */
+ if (cores[CURRENT_CORE].waking != NULL)
+ wake_list_awaken();
+
if (last_tick != current_tick)
{
check_sleepers();
@@ -397,7 +438,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
#ifdef SIMULATOR
/* Do nothing */
#else
-
+
/* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior
* to this call. */
@@ -593,10 +634,12 @@ void wakeup_thread(struct thread_entry **list)
{
case STATE_BLOCKED:
/* Remove thread from the list of blocked threads and add it
- * to the scheduler's list of running processes. */
+ * to the scheduler's list of running processes. List removal
+ * is safe since each object maintains it's own list of
+ * sleepers and queues protect against reentrancy. */
remove_from_list(list, thread);
- add_to_list(&cores[CURRENT_CORE].running, thread);
-
+ add_to_list(cores[CURRENT_CORE].wakeup_list, thread);
+
case STATE_BLOCKED_W_TMO:
/* Just remove the timeout to cause scheduler to immediately
* wake up the thread. */
@@ -610,6 +653,18 @@ void wakeup_thread(struct thread_entry **list)
}
}
+/* Like wakeup_thread but safe against IRQ corruption when IRQs are disabled
+ before calling. */
+void wakeup_thread_irq_safe(struct thread_entry **list)
+{
+ struct core_entry *core = &cores[CURRENT_CORE];
+ /* Switch wakeup lists and call wakeup_thread */
+ core->wakeup_list = &core->waking;
+ wakeup_thread(list);
+ /* Switch back to normal running list */
+ core->wakeup_list = &core->running;
+}
+
/*---------------------------------------------------------------------------
* Create a thread
* If using a dual core architecture, specify which core to start the thread
@@ -794,6 +849,8 @@ void init_threads(void)
memset(cores, 0, sizeof cores);
cores[core].sleeping = NULL;
cores[core].running = NULL;
+ cores[core].waking = NULL;
+ cores[core].wakeup_list = &cores[core].running;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
cores[core].switch_to_irq_level = STAY_IRQ_LEVEL;
#endif