summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread.c')
-rw-r--r--firmware/kernel/thread.c1837
1 files changed, 653 insertions, 1184 deletions
diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c
index c148f6b..b916c3b 100644
--- a/firmware/kernel/thread.c
+++ b/firmware/kernel/thread.c
@@ -37,11 +37,6 @@
#endif
#include "core_alloc.h"
-/****************************************************************************
- * ATTENTION!! *
- * See notes below on implementing processor-specific portions! *
- ***************************************************************************/
-
/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
#ifdef DEBUG
#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
@@ -49,7 +44,11 @@
#define THREAD_EXTRA_CHECKS 0
#endif
-/**
+/****************************************************************************
+ * ATTENTION!! *
+ * See notes below on implementing processor-specific portions! *
+ ****************************************************************************
+ *
* General locking order to guarantee progress. Order must be observed but
* all stages are not nescessarily obligatory. Going from 1) to 3) is
* perfectly legal.
@@ -66,14 +65,14 @@
* unlock and the other processor's handler may proceed at that time. Not
* nescessary when the resource in question is definitely not available to
* interrupt handlers.
- *
+ *
* 2) Kernel Object
* 1) May be needed beforehand if the kernel object allows dual-use such as
* event queues. The kernel object must have a scheme to protect itself from
* access by another processor and is responsible for serializing the calls
- * to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
- * other. Objects' queues are also protected here.
- *
+ * to block_thread and wakeup_thread both to themselves and to each other.
+ * Objects' queues are also protected here.
+ *
* 3) Thread Slot
* This locks access to the thread's slot such that its state cannot be
* altered by another processor when a state change is in progress such as
@@ -121,68 +120,62 @@
* available then some careful non-blocking synchonization is needed (as on
* PP targets at the moment).
*---------------------------------------------------------------------------
+ *
+ *
+ *---------------------------------------------------------------------------
+ * Priority distribution structure (one category for each possible priority):
+ *
+ * +----+----+----+ ... +------+
+ * hist: | F0 | F1 | F2 | | Fn-1 |
+ * +----+----+----+ ... +------+
+ * mask: | b0 | b1 | b2 | | bn-1 |
+ * +----+----+----+ ... +------+
+ *
+ * F = count of threads at priority category n (frequency)
+ * b = bitmask of non-zero priority categories (occupancy)
+ *
+ * / if H[n] != 0 : 1
+ * b[n] = |
+ * \ else : 0
+ *
+ *---------------------------------------------------------------------------
+ * Basic priority inheritance priotocol (PIP):
+ *
+ * Mn = mutex n, Tn = thread n
+ *
+ * A lower priority thread inherits the priority of the highest priority
+ * thread blocked waiting for it to complete an action (such as release a
+ * mutex or respond to a message via queue_send):
+ *
+ * 1) T2->M1->T1
+ *
+ * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
+ * priority than T1 then T1 inherits the priority of T2.
+ *
+ * 2) T3
+ * \/
+ * T2->M1->T1
+ *
+ * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
+ * T1 inherits the higher of T2 and T3.
+ *
+ * 3) T3->M2->T2->M1->T1
+ *
+ * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
+ * then T1 inherits the priority of T3 through T2.
+ *
+ * Blocking chains can grow arbitrarily complex (though it's best that they
+ * not form at all very often :) and build-up from these units.
+ *---------------------------------------------------------------------------
*/
-
-/* Cast to the the machine pointer size, whose size could be < 4 or > 32
- * (someday :). */
-static struct core_entry cores[NUM_CORES] IBSS_ATTR;
-struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
-
-static const char main_thread_name[] = "main";
-#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
-extern uintptr_t stackbegin[];
-extern uintptr_t stackend[];
-#else
-extern uintptr_t *stackbegin;
-extern uintptr_t *stackend;
-#endif
-
-static inline void core_sleep(IF_COP_VOID(unsigned int core))
- __attribute__((always_inline));
-
-void check_tmo_threads(void)
- __attribute__((noinline));
-
-static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
- __attribute__((always_inline));
-
-static void add_to_list_tmo(struct thread_entry *thread)
- __attribute__((noinline));
-
-static void core_schedule_wakeup(struct thread_entry *thread)
- __attribute__((noinline));
-
-#if NUM_CORES > 1
-static inline void run_blocking_ops(
- unsigned int core, struct thread_entry *thread)
- __attribute__((always_inline));
-#endif
-
-static void thread_stkov(struct thread_entry *thread)
- __attribute__((noinline));
-
-static inline void store_context(void* addr)
- __attribute__((always_inline));
-
-static inline void load_context(const void* addr)
- __attribute__((always_inline));
-
-#if NUM_CORES > 1
-static void thread_final_exit_do(struct thread_entry *current)
- __attribute__((noinline)) NORETURN_ATTR USED_ATTR;
-#else
-static inline void thread_final_exit(struct thread_entry *current)
- __attribute__((always_inline)) NORETURN_ATTR;
-#endif
-
-void switch_thread(void)
- __attribute__((noinline));
+static FORCE_INLINE void core_sleep(IF_COP_VOID(unsigned int core));
+static FORCE_INLINE void store_context(void* addr);
+static FORCE_INLINE void load_context(const void* addr);
/****************************************************************************
* Processor/OS-specific section - include necessary core support
*/
-
#include "asm/thread.c"
#if defined (CPU_PP)
@@ -193,20 +186,17 @@ void switch_thread(void)
* End Processor-specific section
***************************************************************************/
-static NO_INLINE
+static NO_INLINE NORETURN_ATTR
void thread_panicf(const char *msg, struct thread_entry *thread)
{
IF_COP( const unsigned int core = thread->core; )
- static char namebuf[sizeof (((struct thread_debug_info *)0)->name)];
- const char *name = thread->name;
- if (!name)
- name = "";
- snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX",
- name, (unsigned long)thread->id);
+ static char name[sizeof (((struct thread_debug_info *)0)->name)];
+ format_thread_name(name, sizeof (name), thread);
panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
+ while (1);
}
-static void thread_stkov(struct thread_entry *thread)
+static NO_INLINE void thread_stkov(struct thread_entry *thread)
{
thread_panicf("Stkov", thread);
}
@@ -218,36 +208,51 @@ static void thread_stkov(struct thread_entry *thread)
({ if (!({ exp; })) thread_panicf((msg), (thread)); })
#else
#define THREAD_PANICF(msg, thread) \
- do {} while (0)
+ do {} while (1)
#define THREAD_ASSERT(exp, msg, thread) \
do {} while (0)
#endif /* THREAD_EXTRA_CHECKS */
+/* Thread locking */
+#if NUM_CORES > 1
+#define LOCK_THREAD(thread) \
+ ({ corelock_lock(&(thread)->slot_cl); })
+#define TRY_LOCK_THREAD(thread) \
+ ({ corelock_try_lock(&(thread)->slot_cl); })
+#define UNLOCK_THREAD(thread) \
+ ({ corelock_unlock(&(thread)->slot_cl); })
+#else /* NUM_CORES == 1*/
+#define LOCK_THREAD(thread) \
+ ({ (void)(thread); })
+#define TRY_LOCK_THREAD(thread) \
+ ({ (void)(thread); })
+#define UNLOCK_THREAD(thread) \
+ ({ (void)(thread); })
+#endif /* NUM_CORES */
+
/* RTR list */
-#define RTR_LOCK(core) \
- ({ corelock_lock(&cores[core].rtr_cl); })
-#define RTR_UNLOCK(core) \
- ({ corelock_unlock(&cores[core].rtr_cl); })
+#define RTR_LOCK(corep) \
+ corelock_lock(&(corep)->rtr_cl)
+#define RTR_UNLOCK(corep) \
+ corelock_unlock(&(corep)->rtr_cl)
#ifdef HAVE_PRIORITY_SCHEDULING
-#define rtr_add_entry(core, priority) \
- prio_add_entry(&cores[core].rtr, (priority))
-
-#define rtr_subtract_entry(core, priority) \
- prio_subtract_entry(&cores[core].rtr, (priority))
-
-#define rtr_move_entry(core, from, to) \
- prio_move_entry(&cores[core].rtr, (from), (to))
-#else
-#define rtr_add_entry(core, priority)
-#define rtr_add_entry_inl(core, priority)
-#define rtr_subtract_entry(core, priority)
-#define rtr_subtract_entry_inl(core, priotity)
-#define rtr_move_entry(core, from, to)
-#define rtr_move_entry_inl(core, from, to)
-#endif
+#define rtr_add_entry(corep, priority) \
+ prio_add_entry(&(corep)->rtr_dist, (priority))
+#define rtr_subtract_entry(corep, priority) \
+ prio_subtract_entry(&(corep)->rtr_dist, (priority))
+#define rtr_move_entry(corep, from, to) \
+ prio_move_entry(&(corep)->rtr_dist, (from), (to))
+#else /* !HAVE_PRIORITY_SCHEDULING */
+#define rtr_add_entry(corep, priority) \
+ do {} while (0)
+#define rtr_subtract_entry(corep, priority) \
+ do {} while (0)
+#define rtr_move_entry(corep, from, to) \
+ do {} while (0)
+#endif /* HAVE_PRIORITY_SCHEDULING */
-static inline void thread_store_context(struct thread_entry *thread)
+static FORCE_INLINE void thread_store_context(struct thread_entry *thread)
{
#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
thread->__errno = errno;
@@ -255,7 +260,7 @@ static inline void thread_store_context(struct thread_entry *thread)
store_context(&thread->context);
}
-static inline void thread_load_context(struct thread_entry *thread)
+static FORCE_INLINE void thread_load_context(struct thread_entry *thread)
{
load_context(&thread->context);
#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
@@ -263,272 +268,31 @@ static inline void thread_load_context(struct thread_entry *thread)
#endif
}
-static inline unsigned int should_switch_tasks(void)
+static FORCE_INLINE unsigned int
+should_switch_tasks(struct thread_entry *thread)
{
- unsigned int result = THREAD_OK;
-
-#ifdef HAVE_PRIORITY_SCHEDULING
- struct thread_entry *current = cores[CURRENT_CORE].running;
- if (current &&
- priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask)
- < current->priority)
- {
- /* There is a thread ready to run of higher priority on the same
- * core as the current one; recommend a task switch. */
- result |= THREAD_SWITCH;
- }
-#endif /* HAVE_PRIORITY_SCHEDULING */
-
- return result;
-}
-
#ifdef HAVE_PRIORITY_SCHEDULING
-/*---------------------------------------------------------------------------
- * Locks the thread registered as the owner of the block and makes sure it
- * didn't change in the meantime
- *---------------------------------------------------------------------------
- */
-#if NUM_CORES == 1
-static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
-{
- return bl->thread;
-}
-#else /* NUM_CORES > 1 */
-static struct thread_entry * lock_blocker_thread(struct blocker *bl)
-{
- /* The blocker thread may change during the process of trying to
- capture it */
- while (1)
- {
- struct thread_entry *t = bl->thread;
-
- /* TRY, or else deadlocks are possible */
- if (!t)
- {
- struct blocker_splay *blsplay = (struct blocker_splay *)bl;
- if (corelock_try_lock(&blsplay->cl))
- {
- if (!bl->thread)
- return NULL; /* Still multi */
-
- corelock_unlock(&blsplay->cl);
- }
- }
- else
- {
- if (TRY_LOCK_THREAD(t))
- {
- if (bl->thread == t)
- return t;
-
- UNLOCK_THREAD(t);
- }
- }
- }
-}
-#endif /* NUM_CORES */
-
-static inline void unlock_blocker_thread(struct blocker *bl)
-{
+ const unsigned int core = CURRENT_CORE;
#if NUM_CORES > 1
- struct thread_entry *blt = bl->thread;
- if (blt)
- UNLOCK_THREAD(blt);
- else
- corelock_unlock(&((struct blocker_splay *)bl)->cl);
-#endif /* NUM_CORES > 1*/
- (void)bl;
-}
+ /* Forget about it if different CPU */
+ if (thread->core != core)
+ return THREAD_OK;
+#endif
+ /* Just woke something therefore a thread is on the run queue */
+ struct thread_entry *current =
+ RTR_THREAD_FIRST(&__core_id_entry(core)->rtr);
+ if (LIKELY(thread->priority >= current->priority))
+ return THREAD_OK;
+
+ /* There is a thread ready to run of higher priority on the same
+ * core as the current one; recommend a task switch. */
+ return THREAD_OK | THREAD_SWITCH;
+#else
+ return THREAD_OK;
#endif /* HAVE_PRIORITY_SCHEDULING */
-
-/*---------------------------------------------------------------------------
- * Thread list structure - circular:
- * +------------------------------+
- * | |
- * +--+---+<-+---+<-+---+<-+---+<-+
- * Head->| T | | T | | T | | T |
- * +->+---+->+---+->+---+->+---+--+
- * | |
- * +------------------------------+
- *---------------------------------------------------------------------------
- */
-
-/*---------------------------------------------------------------------------
- * Adds a thread to a list of threads using "insert last". Uses the "l"
- * links.
- *---------------------------------------------------------------------------
- */
-static void add_to_list_l(struct thread_entry **list,
- struct thread_entry *thread)
-{
- struct thread_entry *l = *list;
-
- if (l == NULL)
- {
- /* Insert into unoccupied list */
- thread->l.prev = thread;
- thread->l.next = thread;
- *list = thread;
- return;
- }
-
- /* Insert last */
- thread->l.prev = l->l.prev;
- thread->l.next = l;
- l->l.prev->l.next = thread;
- l->l.prev = thread;
-}
-
-/*---------------------------------------------------------------------------
- * Removes a thread from a list of threads. Uses the "l" links.
- *---------------------------------------------------------------------------
- */
-static void remove_from_list_l(struct thread_entry **list,
- struct thread_entry *thread)
-{
- struct thread_entry *prev, *next;
-
- next = thread->l.next;
-
- if (thread == next)
- {
- /* The only item */
- *list = NULL;
- return;
- }
-
- if (thread == *list)
- {
- /* List becomes next item */
- *list = next;
- }
-
- prev = thread->l.prev;
-
- /* Fix links to jump over the removed entry. */
- next->l.prev = prev;
- prev->l.next = next;
-}
-
-/*---------------------------------------------------------------------------
- * Timeout list structure - circular reverse (to make "remove item" O(1)),
- * NULL-terminated forward (to ease the far more common forward traversal):
- * +------------------------------+
- * | |
- * +--+---+<-+---+<-+---+<-+---+<-+
- * Head->| T | | T | | T | | T |
- * +---+->+---+->+---+->+---+-X
- *---------------------------------------------------------------------------
- */
-
-/*---------------------------------------------------------------------------
- * Add a thread from the core's timout list by linking the pointers in its
- * tmo structure.
- *---------------------------------------------------------------------------
- */
-static void add_to_list_tmo(struct thread_entry *thread)
-{
- struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
- THREAD_ASSERT(thread->tmo.prev == NULL,
- "add_to_list_tmo->already listed", thread);
-
- thread->tmo.next = NULL;
-
- if (tmo == NULL)
- {
- /* Insert into unoccupied list */
- thread->tmo.prev = thread;
- cores[IF_COP_CORE(thread->core)].timeout = thread;
- return;
- }
-
- /* Insert Last */
- thread->tmo.prev = tmo->tmo.prev;
- tmo->tmo.prev->tmo.next = thread;
- tmo->tmo.prev = thread;
-}
-
-/*---------------------------------------------------------------------------
- * Remove a thread from the core's timout list by unlinking the pointers in
- * its tmo structure. Sets thread->tmo.prev to NULL to indicate the timeout
- * is cancelled.
- *---------------------------------------------------------------------------
- */
-static void remove_from_list_tmo(struct thread_entry *thread)
-{
- struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
- struct thread_entry *prev = thread->tmo.prev;
- struct thread_entry *next = thread->tmo.next;
-
- THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
-
- if (next != NULL)
- next->tmo.prev = prev;
-
- if (thread == *list)
- {
- /* List becomes next item and empty if next == NULL */
- *list = next;
- /* Mark as unlisted */
- thread->tmo.prev = NULL;
- }
- else
- {
- if (next == NULL)
- (*list)->tmo.prev = prev;
- prev->tmo.next = next;
- /* Mark as unlisted */
- thread->tmo.prev = NULL;
- }
}
#ifdef HAVE_PRIORITY_SCHEDULING
-/*---------------------------------------------------------------------------
- * Priority distribution structure (one category for each possible priority):
- *
- * +----+----+----+ ... +-----+
- * hist: | F0 | F1 | F2 | | F31 |
- * +----+----+----+ ... +-----+
- * mask: | b0 | b1 | b2 | | b31 |
- * +----+----+----+ ... +-----+
- *
- * F = count of threads at priority category n (frequency)
- * b = bitmask of non-zero priority categories (occupancy)
- *
- * / if H[n] != 0 : 1
- * b[n] = |
- * \ else : 0
- *
- *---------------------------------------------------------------------------
- * Basic priority inheritance priotocol (PIP):
- *
- * Mn = mutex n, Tn = thread n
- *
- * A lower priority thread inherits the priority of the highest priority
- * thread blocked waiting for it to complete an action (such as release a
- * mutex or respond to a message via queue_send):
- *
- * 1) T2->M1->T1
- *
- * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
- * priority than T1 then T1 inherits the priority of T2.
- *
- * 2) T3
- * \/
- * T2->M1->T1
- *
- * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
- * T1 inherits the higher of T2 and T3.
- *
- * 3) T3->M2->T2->M1->T1
- *
- * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
- * then T1 inherits the priority of T3 through T2.
- *
- * Blocking chains can grow arbitrarily complex (though it's best that they
- * not form at all very often :) and build-up from these units.
- *---------------------------------------------------------------------------
- */
/*---------------------------------------------------------------------------
* Increment frequency at category "priority"
@@ -569,25 +333,86 @@ static inline void prio_move_entry(
if (++pd->hist[to] == 1)
priobit_set_bit(&pd->mask, to);
}
+
#endif /* HAVE_PRIORITY_SCHEDULING */
/*---------------------------------------------------------------------------
- * Move a thread back to a running state on its core.
+ * Common init for new thread basic info
*---------------------------------------------------------------------------
*/
-static void core_schedule_wakeup(struct thread_entry *thread)
+static void new_thread_base_init(struct thread_entry *thread,
+ void **stackp, size_t *stack_sizep,
+ const char *name IF_PRIO(, int priority)
+ IF_COP(, unsigned int core))
{
- const unsigned int core = IF_COP_CORE(thread->core);
+ ALIGN_BUFFER(*stackp, *stack_sizep, MIN_STACK_ALIGN);
+ thread->stack = *stackp;
+ thread->stack_size = *stack_sizep;
- RTR_LOCK(core);
+ thread->name = name;
+ wait_queue_init(&thread->queue);
+ thread->wqp = NULL;
+ tmo_set_dequeued(thread);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ thread->skip_count = 0;
+ thread->blocker = NULL;
+ thread->base_priority = priority;
+ thread->priority = priority;
+ memset(&thread->pdist, 0, sizeof(thread->pdist));
+ prio_add_entry(&thread->pdist, priority);
+#endif
+#if NUM_CORES > 1
+ thread->core = core;
+#endif
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ thread->cpu_boost = 0;
+#endif
+#ifdef HAVE_IO_PRIORITY
+ /* Default to high (foreground) priority */
+ thread->io_priority = IO_PRIORITY_IMMEDIATE;
+#endif
+}
+/*---------------------------------------------------------------------------
+ * Move a thread onto the core's run queue and promote it
+ *---------------------------------------------------------------------------
+ */
+static inline void core_rtr_add(struct core_entry *corep,
+ struct thread_entry *thread)
+{
+ RTR_LOCK(corep);
+ rtr_queue_add(&corep->rtr, thread);
+ rtr_add_entry(corep, thread->priority);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ thread->skip_count = thread->base_priority;
+#endif
thread->state = STATE_RUNNING;
+ RTR_UNLOCK(corep);
+}
- add_to_list_l(&cores[core].running, thread);
- rtr_add_entry(core, thread->priority);
-
- RTR_UNLOCK(core);
+/*---------------------------------------------------------------------------
+ * Remove a thread from the core's run queue
+ *---------------------------------------------------------------------------
+ */
+static inline void core_rtr_remove(struct core_entry *corep,
+ struct thread_entry *thread)
+{
+ RTR_LOCK(corep);
+ rtr_queue_remove(&corep->rtr, thread);
+ rtr_subtract_entry(corep, thread->priority);
+ /* Does not demote state */
+ RTR_UNLOCK(corep);
+}
+/*---------------------------------------------------------------------------
+ * Move a thread back to a running state on its core
+ *---------------------------------------------------------------------------
+ */
+static NO_INLINE void core_schedule_wakeup(struct thread_entry *thread)
+{
+ const unsigned int core = IF_COP_CORE(thread->core);
+ struct core_entry *corep = __core_id_entry(core);
+ core_rtr_add(corep, thread);
#if NUM_CORES > 1
if (core != CURRENT_CORE)
core_wake(core);
@@ -596,17 +421,75 @@ static void core_schedule_wakeup(struct thread_entry *thread)
#ifdef HAVE_PRIORITY_SCHEDULING
/*---------------------------------------------------------------------------
+ * Locks the thread registered as the owner of the block and makes sure it
+ * didn't change in the meantime
+ *---------------------------------------------------------------------------
+ */
+#if NUM_CORES == 1
+static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
+{
+ return bl->thread;
+}
+#else /* NUM_CORES > 1 */
+static struct thread_entry * lock_blocker_thread(struct blocker *bl)
+{
+ /* The blocker thread may change during the process of trying to
+ capture it */
+ while (1)
+ {
+ struct thread_entry *t = bl->thread;
+
+ /* TRY, or else deadlocks are possible */
+ if (!t)
+ {
+ struct blocker_splay *blsplay = (struct blocker_splay *)bl;
+ if (corelock_try_lock(&blsplay->cl))
+ {
+ if (!bl->thread)
+ return NULL; /* Still multi */
+
+ corelock_unlock(&blsplay->cl);
+ }
+ }
+ else
+ {
+ if (TRY_LOCK_THREAD(t))
+ {
+ if (bl->thread == t)
+ return t;
+
+ UNLOCK_THREAD(t);
+ }
+ }
+ }
+}
+#endif /* NUM_CORES */
+
+static inline void unlock_blocker_thread(struct blocker *bl)
+{
+#if NUM_CORES > 1
+ struct thread_entry *blt = bl->thread;
+ if (blt)
+ UNLOCK_THREAD(blt);
+ else
+ corelock_unlock(&((struct blocker_splay *)bl)->cl);
+#endif /* NUM_CORES > 1*/
+ (void)bl;
+}
+
+/*---------------------------------------------------------------------------
* Change the priority and rtr entry for a running thread
*---------------------------------------------------------------------------
*/
-static inline void set_running_thread_priority(
+static inline void set_rtr_thread_priority(
struct thread_entry *thread, int priority)
{
const unsigned int core = IF_COP_CORE(thread->core);
- RTR_LOCK(core);
- rtr_move_entry(core, thread->priority, priority);
+ struct core_entry *corep = __core_id_entry(core);
+ RTR_LOCK(corep);
+ rtr_move_entry(corep, thread->priority, priority);
thread->priority = priority;
- RTR_UNLOCK(core);
+ RTR_UNLOCK(corep);
}
/*---------------------------------------------------------------------------
@@ -619,30 +502,21 @@ static inline void set_running_thread_priority(
* penalty under high contention.
*---------------------------------------------------------------------------
*/
-static int find_highest_priority_in_list_l(
- struct thread_entry * const thread)
+static int wait_queue_find_priority(struct __wait_queue *wqp)
{
- if (LIKELY(thread != NULL))
- {
- /* Go though list until the ending up at the initial thread */
- int highest_priority = thread->priority;
- struct thread_entry *curr = thread;
+ int highest_priority = PRIORITY_IDLE;
+ struct thread_entry *thread = WQ_THREAD_FIRST(wqp);
- do
- {
- int priority = curr->priority;
-
- if (priority < highest_priority)
- highest_priority = priority;
-
- curr = curr->l.next;
- }
- while (curr != thread);
+ while (thread != NULL)
+ {
+ int priority = thread->priority;
+ if (priority < highest_priority)
+ highest_priority = priority;
- return highest_priority;
+ thread = WQ_THREAD_NEXT(thread);
}
- return PRIORITY_IDLE;
+ return highest_priority;
}
/*---------------------------------------------------------------------------
@@ -666,7 +540,7 @@ static void inherit_priority(
{
/* Multiple owners */
struct blocker_splay *blsplay = (struct blocker_splay *)bl;
-
+
/* Recurse down the all the branches of this; it's the only way.
We might meet the same queue several times if more than one of
these threads is waiting the same queue. That isn't a problem
@@ -674,7 +548,7 @@ static void inherit_priority(
FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
{
bl->priority = oldblpr; /* To see the change each time */
- blt = &threads[slotnum];
+ blt = __thread_slot_entry(slotnum);
LOCK_THREAD(blt);
inherit_priority(blocker0, bl, blt, newblpr);
}
@@ -699,7 +573,7 @@ static void inherit_priority(
if (blt->state == STATE_RUNNING)
{
- set_running_thread_priority(blt, newpr);
+ set_rtr_thread_priority(blt, newpr);
break; /* Running: last in chain */
}
@@ -714,7 +588,7 @@ static void inherit_priority(
break; /* Full circle - deadlock! */
/* Blocker becomes current thread and the process repeats */
- struct thread_entry **bqp = blt->bqp;
+ struct __wait_queue *wqp = wait_queue_ptr(blt);
struct thread_entry *t = blt;
blt = lock_blocker_thread(bl);
@@ -725,7 +599,7 @@ static void inherit_priority(
if (newpr <= oldblpr)
newblpr = newpr;
else if (oldpr <= oldblpr)
- newblpr = find_highest_priority_in_list_l(*bqp);
+ newblpr = wait_queue_find_priority(wqp);
if (newblpr == oldblpr)
break; /* Queue priority not changing */
@@ -735,22 +609,46 @@ static void inherit_priority(
}
/*---------------------------------------------------------------------------
- * Quick-disinherit of priority elevation. 'thread' must be a running thread.
+ * Quick-inherit of priority elevation. 'thread' must be not runnable
*---------------------------------------------------------------------------
*/
-static void priority_disinherit_internal(struct thread_entry *thread,
- int blpr)
+static void priority_inherit_internal_inner(struct thread_entry *thread,
+ int blpr)
+{
+ if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < thread->priority)
+ thread->priority = blpr;
+}
+
+static inline void priority_inherit_internal(struct thread_entry *thread,
+ int blpr)
{
- if (blpr < PRIORITY_IDLE &&
- prio_subtract_entry(&thread->pdist, blpr) == 0 &&
+ if (blpr < PRIORITY_IDLE)
+ priority_inherit_internal_inner(thread, blpr);
+}
+
+/*---------------------------------------------------------------------------
+ * Quick-disinherit of priority elevation. 'thread' must current
+ *---------------------------------------------------------------------------
+ */
+static void priority_disinherit_internal_inner(struct thread_entry *thread,
+ int blpr)
+{
+ if (prio_subtract_entry(&thread->pdist, blpr) == 0 &&
blpr <= thread->priority)
{
int priority = priobit_ffs(&thread->pdist.mask);
if (priority != thread->priority)
- set_running_thread_priority(thread, priority);
+ set_rtr_thread_priority(thread, priority);
}
}
+static inline void priority_disinherit_internal(struct thread_entry *thread,
+ int blpr)
+{
+ if (blpr < PRIORITY_IDLE)
+ priority_disinherit_internal_inner(thread, blpr);
+}
+
void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
{
LOCK_THREAD(thread);
@@ -767,30 +665,32 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
{
/* All threads will have the same blocker and queue; only we are changing
it now */
- struct thread_entry **bqp = thread->bqp;
- struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker;
- struct thread_entry *blt = blsplay->blocker.thread;
+ struct __wait_queue *wqp = wait_queue_ptr(thread);
+ struct blocker *bl = thread->blocker;
+ struct blocker_splay *blsplay = (struct blocker_splay *)bl;
+ struct thread_entry *blt = bl->thread;
/* The first thread is already locked and is assumed tagged "multi" */
int count = 1;
- struct thread_entry *temp_queue = NULL;
- /* 'thread' is locked on entry */
+ /* Multiple versions of the wait queue may be seen if doing more than
+ one thread; queue removal isn't destructive to the pointers of the node
+ being removed; this may lead to the blocker priority being wrong for a
+ time but it gets fixed up below after getting exclusive access to the
+ queue */
while (1)
{
- LOCK_THREAD(blt);
-
- remove_from_list_l(bqp, thread);
thread->blocker = NULL;
+ wait_queue_remove(thread);
- struct thread_entry *tnext = *bqp;
+ unsigned int slotnum = THREAD_ID_SLOT(thread->id);
+ threadbit_set_bit(&blsplay->mask, slotnum);
+
+ struct thread_entry *tnext = WQ_THREAD_NEXT(thread);
if (tnext == NULL || tnext->retval == 0)
break;
- add_to_list_l(&temp_queue, thread);
-
UNLOCK_THREAD(thread);
- UNLOCK_THREAD(blt);
count++;
thread = tnext;
@@ -798,65 +698,51 @@ static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
LOCK_THREAD(thread);
}
- int blpr = blsplay->blocker.priority;
- priority_disinherit_internal(blt, blpr);
-
/* Locking order reverses here since the threads are no longer on the
- queue side */
+ queued side */
if (count > 1)
- {
- add_to_list_l(&temp_queue, thread);
- UNLOCK_THREAD(thread);
corelock_lock(&blsplay->cl);
- blpr = find_highest_priority_in_list_l(*bqp);
+ LOCK_THREAD(blt);
+
+ int blpr = bl->priority;
+ priority_disinherit_internal(blt, blpr);
+
+ if (count > 1)
+ {
blsplay->blocker.thread = NULL;
- thread = temp_queue;
- LOCK_THREAD(thread);
+ blpr = wait_queue_find_priority(wqp);
+
+ FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
+ {
+ UNLOCK_THREAD(thread);
+ thread = __thread_slot_entry(slotnum);
+ LOCK_THREAD(thread);
+ priority_inherit_internal(thread, blpr);
+ core_schedule_wakeup(thread);
+ }
}
else
{
/* Becomes a simple, direct transfer */
- if (thread->priority <= blpr)
- blpr = find_highest_priority_in_list_l(*bqp);
blsplay->blocker.thread = thread;
- }
-
- blsplay->blocker.priority = blpr;
- while (1)
- {
- unsigned int slotnum = THREAD_ID_SLOT(thread->id);
- threadbit_set_bit(&blsplay->mask, slotnum);
-
- if (blpr < PRIORITY_IDLE)
- {
- prio_add_entry(&thread->pdist, blpr);
- if (blpr < thread->priority)
- thread->priority = blpr;
- }
-
- if (count > 1)
- remove_from_list_l(&temp_queue, thread);
+ if (thread->priority <= blpr)
+ blpr = wait_queue_find_priority(wqp);
+ priority_inherit_internal(thread, blpr);
core_schedule_wakeup(thread);
+ }
- UNLOCK_THREAD(thread);
-
- thread = temp_queue;
- if (thread == NULL)
- break;
+ UNLOCK_THREAD(thread);
- LOCK_THREAD(thread);
- }
+ bl->priority = blpr;
UNLOCK_THREAD(blt);
if (count > 1)
- {
corelock_unlock(&blsplay->cl);
- }
blt->retval = count;
}
@@ -876,29 +762,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread)
struct blocker *bl = thread->blocker;
struct thread_entry *blt = bl->thread;
- THREAD_ASSERT(cores[CURRENT_CORE].running == blt,
- "UPPT->wrong thread", cores[CURRENT_CORE].running);
+ THREAD_ASSERT(__running_self_entry() == blt,
+ "UPPT->wrong thread", __running_self_entry());
LOCK_THREAD(blt);
- struct thread_entry **bqp = thread->bqp;
- remove_from_list_l(bqp, thread);
thread->blocker = NULL;
+ struct __wait_queue *wqp = wait_queue_remove(thread);
int blpr = bl->priority;
/* Remove the object's boost from the owning thread */
- if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority)
- {
- /* No more threads at this priority are waiting and the old level is
- * at least the thread level */
- int priority = priobit_ffs(&blt->pdist.mask);
- if (priority != blt->priority)
- set_running_thread_priority(blt, priority);
- }
-
- struct thread_entry *tnext = *bqp;
+ priority_disinherit_internal_inner(blt, blpr);
+ struct thread_entry *tnext = WQ_THREAD_FIRST(wqp);
if (LIKELY(tnext == NULL))
{
/* Expected shortcut - no more waiters */
@@ -906,20 +783,20 @@ static void wakeup_thread_transfer(struct thread_entry *thread)
}
else
{
- /* If lowering, we need to scan threads remaining in queue */
- int priority = thread->priority;
- if (priority <= blpr)
- blpr = find_highest_priority_in_list_l(tnext);
+ /* If thread is at the blocker priority, its removal may drop it */
+ if (thread->priority <= blpr)
+ blpr = wait_queue_find_priority(wqp);
- if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority)
- thread->priority = blpr; /* Raise new owner */
+ priority_inherit_internal_inner(thread, blpr);
}
+ bl->thread = thread; /* This thread pwns */
+
core_schedule_wakeup(thread);
UNLOCK_THREAD(thread);
- bl->thread = thread; /* This thread pwns */
- bl->priority = blpr; /* Save highest blocked priority */
+ bl->priority = blpr; /* Save highest blocked priority */
+
UNLOCK_THREAD(blt);
}
@@ -933,9 +810,9 @@ static void wakeup_thread_release(struct thread_entry *thread)
{
struct blocker *bl = thread->blocker;
struct thread_entry *blt = lock_blocker_thread(bl);
- struct thread_entry **bqp = thread->bqp;
- remove_from_list_l(bqp, thread);
+
thread->blocker = NULL;
+ struct __wait_queue *wqp = wait_queue_remove(thread);
/* Off to see the wizard... */
core_schedule_wakeup(thread);
@@ -950,7 +827,7 @@ static void wakeup_thread_release(struct thread_entry *thread)
UNLOCK_THREAD(thread);
- int newblpr = find_highest_priority_in_list_l(*bqp);
+ int newblpr = wait_queue_find_priority(wqp);
if (newblpr == bl->priority)
{
/* Blocker priority won't change */
@@ -963,25 +840,17 @@ static void wakeup_thread_release(struct thread_entry *thread)
#endif /* HAVE_PRIORITY_SCHEDULING */
+
/*---------------------------------------------------------------------------
* Explicitly wakeup a thread on a blocking queue. Only effects threads of
* STATE_BLOCKED and STATE_BLOCKED_W_TMO.
*
- * This code should be considered a critical section by the caller meaning
- * that the object's corelock should be held.
- *
- * INTERNAL: Intended for use by kernel objects and not for programs.
+ * INTERNAL: Intended for use by kernel and not programs.
*---------------------------------------------------------------------------
*/
-unsigned int wakeup_thread_(struct thread_entry **list
+unsigned int wakeup_thread_(struct thread_entry *thread
IF_PRIO(, enum wakeup_thread_protocol proto))
{
- struct thread_entry *thread = *list;
-
- /* Check if there is a blocked thread at all. */
- if (*list == NULL)
- return THREAD_NONE;
-
LOCK_THREAD(thread);
/* Determine thread's current state. */
@@ -1008,24 +877,21 @@ unsigned int wakeup_thread_(struct thread_entry **list
else
#endif /* HAVE_PRIORITY_SCHEDULING */
{
- /* No PIP - just boost the thread by aging */
-#ifdef HAVE_PRIORITY_SCHEDULING
- thread->skip_count = thread->priority;
-#endif /* HAVE_PRIORITY_SCHEDULING */
- remove_from_list_l(list, thread);
+ wait_queue_remove(thread);
core_schedule_wakeup(thread);
UNLOCK_THREAD(thread);
}
- return should_switch_tasks();
+ return should_switch_tasks(thread);
- /* Nothing to do. State is not blocked. */
- default:
-#if THREAD_EXTRA_CHECKS
- THREAD_PANICF("wakeup_thread->block invalid", thread);
case STATE_RUNNING:
- case STATE_KILLED:
-#endif
+ if (wait_queue_try_remove(thread))
+ {
+ UNLOCK_THREAD(thread);
+ return THREAD_OK; /* timed out */
+ }
+
+ default:
UNLOCK_THREAD(thread);
return THREAD_NONE;
}
@@ -1037,201 +903,102 @@ unsigned int wakeup_thread_(struct thread_entry **list
* tick when the next check will occur.
*---------------------------------------------------------------------------
*/
-void check_tmo_threads(void)
+static NO_INLINE void check_tmo_expired_inner(struct core_entry *corep)
{
- const unsigned int core = CURRENT_CORE;
const long tick = current_tick; /* snapshot the current tick */
long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
- struct thread_entry *next = cores[core].timeout;
+ struct thread_entry *prev = NULL;
+ struct thread_entry *thread = TMO_THREAD_FIRST(&corep->tmo);
/* If there are no processes waiting for a timeout, just keep the check
tick from falling into the past. */
/* Break the loop once we have walked through the list of all
* sleeping processes or have removed them all. */
- while (next != NULL)
+ while (thread != NULL)
{
/* Check sleeping threads. Allow interrupts between checks. */
enable_irq();
- struct thread_entry *curr = next;
-
- next = curr->tmo.next;
+ struct thread_entry *next = TMO_THREAD_NEXT(thread);
/* Lock thread slot against explicit wakeup */
disable_irq();
- LOCK_THREAD(curr);
+ LOCK_THREAD(thread);
- unsigned state = curr->state;
+ unsigned int state = thread->state;
- if (state < TIMEOUT_STATE_FIRST)
- {
- /* Cleanup threads no longer on a timeout but still on the
- * list. */
- remove_from_list_tmo(curr);
- }
- else if (LIKELY(TIME_BEFORE(tick, curr->tmo_tick)))
+ if (LIKELY(state >= TIMEOUT_STATE_FIRST &&
+ TIME_BEFORE(tick, thread->tmo_tick)))
{
/* Timeout still pending - this will be the usual case */
- if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
+ if (TIME_BEFORE(thread->tmo_tick, next_tmo_check))
{
- /* Earliest timeout found so far - move the next check up
- to its time */
- next_tmo_check = curr->tmo_tick;
+ /* Move the next check up to its time */
+ next_tmo_check = thread->tmo_tick;
}
+
+ prev = thread;
}
else
{
- /* Sleep timeout has been reached so bring the thread back to
- * life again. */
- if (state == STATE_BLOCKED_W_TMO)
- {
-#ifdef HAVE_CORELOCK_OBJECT
- /* Lock the waiting thread's kernel object */
- struct corelock *ocl = curr->obj_cl;
-
- if (UNLIKELY(corelock_try_lock(ocl) == 0))
- {
- /* Need to retry in the correct order though the need is
- * unlikely */
- UNLOCK_THREAD(curr);
- corelock_lock(ocl);
- LOCK_THREAD(curr);
-
- if (UNLIKELY(curr->state != STATE_BLOCKED_W_TMO))
- {
- /* Thread was woken or removed explicitely while slot
- * was unlocked */
- corelock_unlock(ocl);
- remove_from_list_tmo(curr);
- UNLOCK_THREAD(curr);
- continue;
- }
- }
-#endif /* NUM_CORES */
-
-#ifdef HAVE_WAKEUP_EXT_CB
- if (curr->wakeup_ext_cb != NULL)
- curr->wakeup_ext_cb(curr);
-#endif
-
-#ifdef HAVE_PRIORITY_SCHEDULING
- if (curr->blocker != NULL)
- wakeup_thread_release(curr);
- else
-#endif
- remove_from_list_l(curr->bqp, curr);
-
- corelock_unlock(ocl);
- }
- /* else state == STATE_SLEEPING */
+ /* TODO: there are no priority-inheriting timeout blocks
+ right now but the procedure should be established */
- remove_from_list_tmo(curr);
+ /* Sleep timeout has been reached / garbage collect stale list
+ items */
+ tmo_queue_expire(&corep->tmo, prev, thread);
- RTR_LOCK(core);
+ if (state >= TIMEOUT_STATE_FIRST)
+ core_rtr_add(corep, thread);
- curr->state = STATE_RUNNING;
-
- add_to_list_l(&cores[core].running, curr);
- rtr_add_entry(core, curr->priority);
-
- RTR_UNLOCK(core);
+ /* removed this one - prev doesn't change */
}
- UNLOCK_THREAD(curr);
- }
-
- cores[core].next_tmo_check = next_tmo_check;
-}
-
-/*---------------------------------------------------------------------------
- * Performs operations that must be done before blocking a thread but after
- * the state is saved.
- *---------------------------------------------------------------------------
- */
-#if NUM_CORES > 1
-static inline void run_blocking_ops(
- unsigned int core, struct thread_entry *thread)
-{
- struct thread_blk_ops *ops = &cores[core].blk_ops;
- const unsigned flags = ops->flags;
-
- if (LIKELY(flags == TBOP_CLEAR))
- return;
+ UNLOCK_THREAD(thread);
- switch (flags)
- {
- case TBOP_SWITCH_CORE:
- core_switch_blk_op(core, thread);
- /* Fall-through */
- case TBOP_UNLOCK_CORELOCK:
- corelock_unlock(ops->cl_p);
- break;
+ thread = next;
}
- ops->flags = TBOP_CLEAR;
+ corep->next_tmo_check = next_tmo_check;
}
-#endif /* NUM_CORES > 1 */
-#ifdef RB_PROFILE
-void profile_thread(void)
+static FORCE_INLINE void check_tmo_expired(struct core_entry *corep)
{
- profstart(cores[CURRENT_CORE].running - threads);
+ if (!TIME_BEFORE(current_tick, corep->next_tmo_check))
+ check_tmo_expired_inner(corep);
}
-#endif
/*---------------------------------------------------------------------------
- * Prepares a thread to block on an object's list and/or for a specified
- * duration - expects object and slot to be appropriately locked if needed
- * and interrupts to be masked.
+ * Prepares a the current thread to sleep forever or for the given duration.
*---------------------------------------------------------------------------
*/
-static inline void block_thread_on_l(struct thread_entry *thread,
- unsigned state)
+static FORCE_INLINE void prepare_block(struct thread_entry *current,
+ unsigned int state, int timeout)
{
- /* If inlined, unreachable branches will be pruned with no size penalty
- because state is passed as a constant parameter. */
- const unsigned int core = IF_COP_CORE(thread->core);
+ const unsigned int core = IF_COP_CORE(current->core);
/* Remove the thread from the list of running threads. */
- RTR_LOCK(core);
- remove_from_list_l(&cores[core].running, thread);
- rtr_subtract_entry(core, thread->priority);
- RTR_UNLOCK(core);
+ struct core_entry *corep = __core_id_entry(core);
+ core_rtr_remove(corep, current);
- /* Add a timeout to the block if not infinite */
- switch (state)
+ if (timeout >= 0)
{
- case STATE_BLOCKED:
- case STATE_BLOCKED_W_TMO:
- /* Put the thread into a new list of inactive threads. */
- add_to_list_l(thread->bqp, thread);
+ /* Sleep may expire. */
+ long tmo_tick = current_tick + timeout;
+ current->tmo_tick = tmo_tick;
- if (state == STATE_BLOCKED)
- break;
+ if (TIME_BEFORE(tmo_tick, corep->next_tmo_check))
+ corep->next_tmo_check = tmo_tick;
- /* Fall-through */
- case STATE_SLEEPING:
- /* If this thread times out sooner than any other thread, update
- next_tmo_check to its timeout */
- if (TIME_BEFORE(thread->tmo_tick, cores[core].next_tmo_check))
- {
- cores[core].next_tmo_check = thread->tmo_tick;
- }
+ tmo_queue_register(&corep->tmo, current);
- if (thread->tmo.prev == NULL)
- {
- add_to_list_tmo(thread);
- }
- /* else thread was never removed from list - just keep it there */
- break;
+ if (state == STATE_BLOCKED)
+ state = STATE_BLOCKED_W_TMO;
}
- /* Remember the the next thread about to block. */
- cores[core].block_task = thread;
-
/* Report new state. */
- thread->state = state;
+ current->state = state;
}
/*---------------------------------------------------------------------------
@@ -1239,178 +1006,120 @@ static inline void block_thread_on_l(struct thread_entry *thread,
* that removed itself from the running list first must specify itself in
* the paramter.
*
- * INTERNAL: Intended for use by kernel and not for programs.
+ * INTERNAL: Intended for use by kernel and not programs.
*---------------------------------------------------------------------------
*/
void switch_thread(void)
{
-
const unsigned int core = CURRENT_CORE;
- struct thread_entry *block = cores[core].block_task;
- struct thread_entry *thread = cores[core].running;
+ struct core_entry *corep = __core_id_entry(core);
+ struct thread_entry *thread = corep->running;
- /* Get context to save - next thread to run is unknown until all wakeups
- * are evaluated */
- if (block != NULL)
+ if (thread)
{
- cores[core].block_task = NULL;
-
-#if NUM_CORES > 1
- if (UNLIKELY(thread == block))
- {
- /* This was the last thread running and another core woke us before
- * reaching here. Force next thread selection to give tmo threads or
- * other threads woken before this block a first chance. */
- block = NULL;
- }
- else
-#endif
- {
- /* Blocking task is the old one */
- thread = block;
- }
- }
-
#ifdef RB_PROFILE
-#ifdef CPU_COLDFIRE
- _profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
-#else
- profile_thread_stopped(thread->id & THREAD_ID_SLOT_MASK);
+ profile_thread_stopped(THREAD_ID_SLOT(thread->id));
#endif
-#endif
-
- /* Begin task switching by saving our current context so that we can
- * restore the state of the current thread later to the point prior
- * to this call. */
- thread_store_context(thread);
#ifdef DEBUG
- /* Check core_ctx buflib integrity */
- core_check_valid();
-#endif
-
- /* Check if the current thread stack is overflown */
- if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
- thread_stkov(thread);
-
-#if NUM_CORES > 1
- /* Run any blocking operations requested before switching/sleeping */
- run_blocking_ops(core, thread);
+ /* Check core_ctx buflib integrity */
+ core_check_valid();
#endif
+ thread_store_context(thread);
-#ifdef HAVE_PRIORITY_SCHEDULING
- /* Reset the value of thread's skip count */
- thread->skip_count = 0;
-#endif
+ /* Check if the current thread stack is overflown */
+ if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
+ thread_stkov(thread);
+ }
+ /* TODO: make a real idle task */
for (;;)
{
- /* If there are threads on a timeout and the earliest wakeup is due,
- * check the list and wake any threads that need to start running
- * again. */
- if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
- {
- check_tmo_threads();
- }
-
disable_irq();
- RTR_LOCK(core);
- thread = cores[core].running;
+ /* Check for expired timeouts */
+ check_tmo_expired(corep);
- if (UNLIKELY(thread == NULL))
- {
- /* Enter sleep mode to reduce power usage - woken up on interrupt
- * or wakeup request from another core - expected to enable
- * interrupts. */
- RTR_UNLOCK(core);
- core_sleep(IF_COP(core));
- }
- else
- {
-#ifdef HAVE_PRIORITY_SCHEDULING
- /* Select the new task based on priorities and the last time a
- * process got CPU time relative to the highest priority runnable
- * task. */
- int max = priobit_ffs(&cores[core].rtr.mask);
+ RTR_LOCK(corep);
- if (block == NULL)
- {
- /* Not switching on a block, tentatively select next thread */
- thread = thread->l.next;
- }
+ if (!RTR_EMPTY(&corep->rtr))
+ break;
- for (;;)
- {
- int priority = thread->priority;
- int diff;
-
- /* This ridiculously simple method of aging seems to work
- * suspiciously well. It does tend to reward CPU hogs (under
- * yielding) but that's generally not desirable at all. On
- * the plus side, it, relatively to other threads, penalizes
- * excess yielding which is good if some high priority thread
- * is performing no useful work such as polling for a device
- * to be ready. Of course, aging is only employed when higher
- * and lower priority threads are runnable. The highest
- * priority runnable thread(s) are never skipped unless a
- * lower-priority process has aged sufficiently. Priorities
- * of REALTIME class are run strictly according to priority
- * thus are not subject to switchout due to lower-priority
- * processes aging; they must give up the processor by going
- * off the run list. */
- if (LIKELY(priority <= max) ||
- (priority > PRIORITY_REALTIME &&
- (diff = priority - max,
- ++thread->skip_count > diff*diff)))
- {
- cores[core].running = thread;
- break;
- }
-
- thread = thread->l.next;
- }
-#else
- /* Without priority use a simple FCFS algorithm */
- if (block == NULL)
- {
- /* Not switching on a block, select next thread */
- thread = thread->l.next;
- cores[core].running = thread;
- }
-#endif /* HAVE_PRIORITY_SCHEDULING */
+ thread = NULL;
+
+ /* Enter sleep mode to reduce power usage */
+ RTR_UNLOCK(corep);
+ core_sleep(IF_COP(core));
+
+ /* Awakened by interrupt or other CPU */
+ }
+
+ thread = (thread && thread->state == STATE_RUNNING) ?
+ RTR_THREAD_NEXT(thread) : RTR_THREAD_FIRST(&corep->rtr);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Select the new task based on priorities and the last time a
+ * process got CPU time relative to the highest priority runnable
+ * task. If priority is not a feature, then FCFS is used (above). */
+ int max = priobit_ffs(&corep->rtr_dist.mask);
- RTR_UNLOCK(core);
- enable_irq();
+ for (;;)
+ {
+ int priority = thread->priority;
+ int diff;
+
+ /* This ridiculously simple method of aging seems to work
+ * suspiciously well. It does tend to reward CPU hogs (under
+ * yielding) but that's generally not desirable at all. On
+ * the plus side, it, relatively to other threads, penalizes
+ * excess yielding which is good if some high priority thread
+ * is performing no useful work such as polling for a device
+ * to be ready. Of course, aging is only employed when higher
+ * and lower priority threads are runnable. The highest
+ * priority runnable thread(s) are never skipped unless a
+ * lower-priority process has aged sufficiently. Priorities
+ * of REALTIME class are run strictly according to priority
+ * thus are not subject to switchout due to lower-priority
+ * processes aging; they must give up the processor by going
+ * off the run list. */
+ if (LIKELY(priority <= max) ||
+ (priority > PRIORITY_REALTIME &&
+ (diff = priority - max, ++thread->skip_count > diff*diff)))
+ {
break;
}
+
+ thread = RTR_THREAD_NEXT(thread);
}
- /* And finally give control to the next thread. */
+ thread->skip_count = 0; /* Reset aging counter */
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+ rtr_queue_make_first(&corep->rtr, thread);
+ corep->running = thread;
+
+ RTR_UNLOCK(corep);
+ enable_irq();
+
+ /* And finally, give control to the next thread. */
thread_load_context(thread);
#ifdef RB_PROFILE
- profile_thread_started(thread->id & THREAD_ID_SLOT_MASK);
+ profile_thread_started(THREAD_ID_SLOT(thread->id));
#endif
-
}
/*---------------------------------------------------------------------------
* Sleeps a thread for at least a specified number of ticks with zero being
* a wait until the next tick.
*
- * INTERNAL: Intended for use by kernel and not for programs.
+ * INTERNAL: Intended for use by kernel and not programs.
*---------------------------------------------------------------------------
*/
void sleep_thread(int ticks)
{
- struct thread_entry *current = cores[CURRENT_CORE].running;
-
+ struct thread_entry *current = __running_self_entry();
LOCK_THREAD(current);
-
- /* Set our timeout, remove from run list and join timeout list. */
- current->tmo_tick = current_tick + MAX(ticks, 0) + 1;
- block_thread_on_l(current, STATE_SLEEPING);
-
+ prepare_block(current, STATE_SLEEPING, MAX(ticks, 0) + 1);
UNLOCK_THREAD(current);
}
@@ -1418,131 +1127,42 @@ void sleep_thread(int ticks)
* Block a thread on a blocking queue for explicit wakeup. If timeout is
* negative, the block is infinite.
*
- * INTERNAL: Intended for use by kernel objects and not for programs.
+ * INTERNAL: Intended for use by kernel and not programs.
*---------------------------------------------------------------------------
*/
-void block_thread(struct thread_entry *current, int timeout)
+void block_thread_(struct thread_entry *current, int timeout)
{
LOCK_THREAD(current);
- struct blocker *bl = NULL;
#ifdef HAVE_PRIORITY_SCHEDULING
- bl = current->blocker;
- struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL;
-#endif /* HAVE_PRIORITY_SCHEDULING */
-
- if (LIKELY(timeout < 0))
- {
- /* Block until explicitly woken */
- block_thread_on_l(current, STATE_BLOCKED);
- }
- else
+ struct blocker *bl = current->blocker;
+ struct thread_entry *blt = NULL;
+ if (bl != NULL)
{
- /* Set the state to blocked with the specified timeout */
- current->tmo_tick = current_tick + timeout;
- block_thread_on_l(current, STATE_BLOCKED_W_TMO);
+ current->blocker = bl;
+ blt = lock_blocker_thread(bl);
}
+#endif /* HAVE_PRIORITY_SCHEDULING */
- if (bl == NULL)
- {
- UNLOCK_THREAD(current);
- return;
- }
+ wait_queue_register(current);
+ prepare_block(current, STATE_BLOCKED, timeout);
#ifdef HAVE_PRIORITY_SCHEDULING
- int newblpr = current->priority;
- UNLOCK_THREAD(current);
-
- if (newblpr >= bl->priority)
+ if (bl != NULL)
{
- unlock_blocker_thread(bl);
- return; /* Queue priority won't change */
- }
+ int newblpr = current->priority;
+ UNLOCK_THREAD(current);
- inherit_priority(bl, bl, blt, newblpr);
+ if (newblpr < bl->priority)
+ inherit_priority(bl, bl, blt, newblpr);
+ else
+ unlock_blocker_thread(bl); /* Queue priority won't change */
+ }
+ else
#endif /* HAVE_PRIORITY_SCHEDULING */
-}
-
-/*---------------------------------------------------------------------------
- * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
- *---------------------------------------------------------------------------
- */
-static void new_thread_id(unsigned int slot_num,
- struct thread_entry *thread)
-{
- unsigned int version =
- (thread->id + (1u << THREAD_ID_VERSION_SHIFT))
- & THREAD_ID_VERSION_MASK;
-
- /* If wrapped to 0, make it 1 */
- if (version == 0)
- version = 1u << THREAD_ID_VERSION_SHIFT;
-
- thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
-}
-
-/*---------------------------------------------------------------------------
- * Find an empty thread slot or MAXTHREADS if none found. The slot returned
- * will be locked on multicore.
- *---------------------------------------------------------------------------
- */
-static struct thread_entry * find_empty_thread_slot(void)
-{
- /* Any slot could be on an interrupt-accessible list */
- IF_COP( int oldlevel = disable_irq_save(); )
- struct thread_entry *thread = NULL;
- int n;
-
- for (n = 0; n < MAXTHREADS; n++)
{
- /* Obtain current slot state - lock it on multicore */
- struct thread_entry *t = &threads[n];
- LOCK_THREAD(t);
-
- if (t->state == STATE_KILLED)
- {
- /* Slot is empty - leave it locked and caller will unlock */
- thread = t;
- break;
- }
-
- /* Finished examining slot - no longer busy - unlock on multicore */
- UNLOCK_THREAD(t);
+ UNLOCK_THREAD(current);
}
-
- IF_COP( restore_irq(oldlevel); ) /* Reenable interrups - this slot is
- not accesible to them yet */
- return thread;
-}
-
-/*---------------------------------------------------------------------------
- * Return the thread_entry pointer for a thread_id. Return the current
- * thread if the ID is (unsigned int)-1 (alias for current).
- *---------------------------------------------------------------------------
- */
-struct thread_entry * thread_id_entry(unsigned int thread_id)
-{
- return &threads[thread_id & THREAD_ID_SLOT_MASK];
-}
-
-/*---------------------------------------------------------------------------
- * Return the thread id of the calling thread
- * --------------------------------------------------------------------------
- */
-unsigned int thread_self(void)
-{
- return cores[CURRENT_CORE].running->id;
-}
-
-/*---------------------------------------------------------------------------
- * Return the thread entry of the calling thread.
- *
- * INTERNAL: Intended for use by kernel and not for programs.
- *---------------------------------------------------------------------------
- */
-struct thread_entry* thread_self_entry(void)
-{
- return cores[CURRENT_CORE].running;
}
/*---------------------------------------------------------------------------
@@ -1552,9 +1172,8 @@ struct thread_entry* thread_self_entry(void)
*/
void core_idle(void)
{
- IF_COP( const unsigned int core = CURRENT_CORE; )
disable_irq();
- core_sleep(IF_COP(core));
+ core_sleep(IF_COP(CURRENT_CORE));
}
/*---------------------------------------------------------------------------
@@ -1570,141 +1189,64 @@ unsigned int create_thread(void (*function)(void),
IF_PRIO(, int priority)
IF_COP(, unsigned int core))
{
- unsigned int i;
- unsigned int stack_words;
- uintptr_t stackptr, stackend;
- struct thread_entry *thread;
- unsigned state;
- int oldlevel;
-
- thread = find_empty_thread_slot();
+ struct thread_entry *thread = thread_alloc();
if (thread == NULL)
- {
return 0;
- }
-
- oldlevel = disable_irq_save();
-
- /* Munge the stack to make it easy to spot stack overflows */
- stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
- stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
- stack_size = stackend - stackptr;
- stack_words = stack_size / sizeof (uintptr_t);
- for (i = 0; i < stack_words; i++)
- {
- ((uintptr_t *)stackptr)[i] = DEADBEEF;
- }
+ new_thread_base_init(thread, &stack, &stack_size, name
+ IF_PRIO(, priority) IF_COP(, core));
- /* Store interesting information */
- thread->name = name;
- thread->stack = (uintptr_t *)stackptr;
- thread->stack_size = stack_size;
- thread->queue = NULL;
-#ifdef HAVE_WAKEUP_EXT_CB
- thread->wakeup_ext_cb = NULL;
-#endif
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
- thread->cpu_boost = 0;
-#endif
-#ifdef HAVE_PRIORITY_SCHEDULING
- memset(&thread->pdist, 0, sizeof(thread->pdist));
- thread->blocker = NULL;
- thread->base_priority = priority;
- thread->priority = priority;
- thread->skip_count = priority;
- prio_add_entry(&thread->pdist, priority);
-#endif
+ unsigned int stack_words = stack_size / sizeof (uintptr_t);
+ if (stack_words == 0)
+ return 0;
-#ifdef HAVE_IO_PRIORITY
- /* Default to high (foreground) priority */
- thread->io_priority = IO_PRIORITY_IMMEDIATE;
-#endif
+ /* Munge the stack to make it easy to spot stack overflows */
+ for (unsigned int i = 0; i < stack_words; i++)
+ ((uintptr_t *)stack)[i] = DEADBEEF;
#if NUM_CORES > 1
- thread->core = core;
-
/* Writeback stack munging or anything else before starting */
if (core != CURRENT_CORE)
- {
commit_dcache();
- }
#endif
- /* Thread is not on any timeout list but be a bit paranoid */
- thread->tmo.prev = NULL;
-
- state = (flags & CREATE_THREAD_FROZEN) ?
- STATE_FROZEN : STATE_RUNNING;
-
- thread->context.sp = (typeof (thread->context.sp))stackend;
-
- /* Load the thread's context structure with needed startup information */
+ thread->context.sp = (typeof (thread->context.sp))(stack + stack_size);
THREAD_STARTUP_INIT(core, thread, function);
- thread->state = state;
- i = thread->id; /* Snapshot while locked */
+ int oldlevel = disable_irq_save();
+ LOCK_THREAD(thread);
+
+ thread->state = STATE_FROZEN;
- if (state == STATE_RUNNING)
+ if (!(flags & CREATE_THREAD_FROZEN))
core_schedule_wakeup(thread);
+ unsigned int id = thread->id; /* Snapshot while locked */
+
UNLOCK_THREAD(thread);
restore_irq(oldlevel);
- return i;
+ return id;
}
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
-/*---------------------------------------------------------------------------
- * Change the boost state of a thread boosting or unboosting the CPU
- * as required.
- *---------------------------------------------------------------------------
- */
-static inline void boost_thread(struct thread_entry *thread, bool boost)
-{
- if ((thread->cpu_boost != 0) != boost)
- {
- thread->cpu_boost = boost;
- cpu_boost(boost);
- }
-}
-
-void trigger_cpu_boost(void)
-{
- struct thread_entry *current = cores[CURRENT_CORE].running;
- boost_thread(current, true);
-}
-
-void cancel_cpu_boost(void)
-{
- struct thread_entry *current = cores[CURRENT_CORE].running;
- boost_thread(current, false);
-}
-#endif /* HAVE_SCHEDULER_BOOSTCTRL */
-
/*---------------------------------------------------------------------------
* Block the current thread until another thread terminates. A thread may
- * wait on itself to terminate which prevents it from running again and it
- * will need to be killed externally.
+ * wait on itself to terminate but that will deadlock
+ *.
* Parameter is the ID as returned from create_thread().
*---------------------------------------------------------------------------
*/
void thread_wait(unsigned int thread_id)
{
- struct thread_entry *current = cores[CURRENT_CORE].running;
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *current = __running_self_entry();
+ struct thread_entry *thread = __thread_id_entry(thread_id);
- /* Lock thread-as-waitable-object lock */
corelock_lock(&thread->waiter_cl);
- /* Be sure it hasn't been killed yet */
if (thread->id == thread_id && thread->state != STATE_KILLED)
{
- IF_COP( current->obj_cl = &thread->waiter_cl; )
- current->bqp = &thread->queue;
-
disable_irq();
- block_thread(current, TIMEOUT_BLOCK);
+ block_thread(current, TIMEOUT_BLOCK, &thread->queue, NULL);
corelock_unlock(&thread->waiter_cl);
@@ -1716,36 +1258,35 @@ void thread_wait(unsigned int thread_id)
}
/*---------------------------------------------------------------------------
- * Exit the current thread. The Right Way to Do Things (TM).
+ * Exit the current thread
*---------------------------------------------------------------------------
*/
-/* This is done to foil optimizations that may require the current stack,
- * such as optimizing subexpressions that put variables on the stack that
- * get used after switching stacks. */
-#if NUM_CORES > 1
-/* Called by ASM stub */
-static void thread_final_exit_do(struct thread_entry *current)
-#else
-/* No special procedure is required before calling */
-static inline void thread_final_exit(struct thread_entry *current)
-#endif
+static USED_ATTR NORETURN_ATTR
+void thread_exit_final(struct thread_entry *current)
{
- /* At this point, this thread isn't using resources allocated for
- * execution except the slot itself. */
+ /* Slot is no longer this thread */
+ new_thread_id(current);
+ current->name = NULL;
- /* Signal this thread */
- thread_queue_wake(&current->queue);
+ /* No longer using resources from creator */
+ wait_queue_wake(&current->queue);
+
+ UNLOCK_THREAD(current);
corelock_unlock(&current->waiter_cl);
+
+ thread_free(current);
+
switch_thread();
+
/* This should never and must never be reached - if it is, the
* state is corrupted */
THREAD_PANICF("thread_exit->K:*R", current);
- while (1);
}
void thread_exit(void)
{
- register struct thread_entry * current = cores[CURRENT_CORE].running;
+ struct core_entry *corep = __core_id_entry(CURRENT_CORE);
+ register struct thread_entry *current = corep->running;
/* Cancel CPU boost if any */
cancel_cpu_boost();
@@ -1764,24 +1305,21 @@ void thread_exit(void)
thread_panicf("abandon ship!", current);
#endif /* HAVE_PRIORITY_SCHEDULING */
- if (current->tmo.prev != NULL)
- {
- /* Cancel pending timeout list removal */
- remove_from_list_tmo(current);
- }
-
- /* Switch tasks and never return */
- block_thread_on_l(current, STATE_KILLED);
-
- /* Slot must be unusable until thread is really gone */
- UNLOCK_THREAD_AT_TASK_SWITCH(current);
+ /* Remove from scheduler lists */
+ tmo_queue_remove(&corep->tmo, current);
+ prepare_block(current, STATE_KILLED, -1);
+ corep->running = NULL; /* No switch_thread context save */
- /* Update ID for this slot */
- new_thread_id(current->id, current);
- current->name = NULL;
+#ifdef RB_PROFILE
+ profile_thread_stopped(THREAD_ID_SLOT(current->id));
+#endif
- /* Do final cleanup and remove the thread */
- thread_final_exit(current);
+ /* Do final release of resources and remove the thread */
+#if NUM_CORES > 1
+ thread_exit_finalize(current->core, current);
+#else
+ thread_exit_final(current);
+#endif
}
#ifdef HAVE_PRIORITY_SCHEDULING
@@ -1796,10 +1334,8 @@ int thread_set_priority(unsigned int thread_id, int priority)
return -1; /* Invalid priority argument */
int old_base_priority = -1;
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *thread = __thread_id_entry(thread_id);
- /* Thread could be on any list and therefore on an interrupt accessible
- one - disable interrupts */
const int oldlevel = disable_irq_save();
LOCK_THREAD(thread);
@@ -1825,7 +1361,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
{
/* This thread is running - just change location on the run queue.
Also sets thread->priority. */
- set_running_thread_priority(thread, new_priority);
+ set_rtr_thread_priority(thread, new_priority);
goto done;
}
@@ -1838,7 +1374,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
}
struct thread_entry *blt = lock_blocker_thread(bl);
- struct thread_entry **bqp = thread->bqp;
+ struct __wait_queue *wqp = wait_queue_ptr(thread);
thread->priority = new_priority;
@@ -1850,7 +1386,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
if (new_priority < oldblpr)
newblpr = new_priority;
else if (old_priority <= oldblpr)
- newblpr = find_highest_priority_in_list_l(*bqp);
+ newblpr = wait_queue_find_priority(wqp);
if (newblpr == oldblpr)
{
@@ -1872,7 +1408,7 @@ done:
*/
int thread_get_priority(unsigned int thread_id)
{
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *thread = __thread_id_entry(thread_id);
int base_priority = thread->base_priority;
/* Simply check without locking slot. It may or may not be valid by the
@@ -1888,13 +1424,13 @@ int thread_get_priority(unsigned int thread_id)
#ifdef HAVE_IO_PRIORITY
int thread_get_io_priority(unsigned int thread_id)
{
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *thread = __thread_id_entry(thread_id);
return thread->io_priority;
}
void thread_set_io_priority(unsigned int thread_id,int io_priority)
{
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *thread = __thread_id_entry(thread_id);
thread->io_priority = io_priority;
}
#endif
@@ -1907,7 +1443,7 @@ void thread_set_io_priority(unsigned int thread_id,int io_priority)
*/
void thread_thaw(unsigned int thread_id)
{
- struct thread_entry *thread = thread_id_entry(thread_id);
+ struct thread_entry *thread = __thread_id_entry(thread_id);
int oldlevel = disable_irq_save();
LOCK_THREAD(thread);
@@ -1926,68 +1462,72 @@ void thread_thaw(unsigned int thread_id)
* Switch the processor that the currently executing thread runs on.
*---------------------------------------------------------------------------
*/
+static USED_ATTR NORETURN_ATTR
+void switch_core_final(unsigned int old_core, struct thread_entry *current)
+{
+ /* Old core won't be using slot resources at this point */
+ core_schedule_wakeup(current);
+ UNLOCK_THREAD(current);
+#ifdef RB_PROFILE
+ profile_thread_stopped(THREAD_ID_SLOT(current->id));
+#endif
+ switch_thread();
+ /* not reached */
+ THREAD_PANICF("switch_core_final->same core!", current);
+ (void)old_core;
+}
+
unsigned int switch_core(unsigned int new_core)
{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry *current = cores[core].running;
+ const unsigned int old_core = CURRENT_CORE;
+ if (old_core == new_core)
+ return old_core; /* No change */
- if (core == new_core)
- {
- /* No change - just return same core */
- return core;
- }
+ struct core_entry *corep = __core_id_entry(old_core);
+ struct thread_entry *current = corep->running;
disable_irq();
LOCK_THREAD(current);
- /* Get us off the running list for the current core */
- RTR_LOCK(core);
- remove_from_list_l(&cores[core].running, current);
- rtr_subtract_entry(core, current->priority);
- RTR_UNLOCK(core);
-
- /* Stash return value (old core) in a safe place */
- current->retval = core;
-
- /* If a timeout hadn't yet been cleaned-up it must be removed now or
- * the other core will likely attempt a removal from the wrong list! */
- if (current->tmo.prev != NULL)
- {
- remove_from_list_tmo(current);
- }
+ /* Remove us from old core lists */
+ tmo_queue_remove(&corep->tmo, current);
+ core_rtr_remove(corep, current);
+ corep->running = NULL; /* No switch_thread context save */
- /* Change the core number for this thread slot */
+ /* Do the actual migration */
current->core = new_core;
+ switch_thread_core(old_core, current);
- /* Do not use core_schedule_wakeup here since this will result in
- * the thread starting to run on the other core before being finished on
- * this one. Delay the list unlock to keep the other core stuck
- * until this thread is ready. */
- RTR_LOCK(new_core);
-
- rtr_add_entry(new_core, current->priority);
- add_to_list_l(&cores[new_core].running, current);
-
- /* Make a callback into device-specific code, unlock the wakeup list so
- * that execution may resume on the new core, unlock our slot and finally
- * restore the interrupt level */
- cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
- cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
- cores[core].block_task = current;
-
- UNLOCK_THREAD(current);
+ /* Executing on new core */
+ return old_core;
+}
+#endif /* NUM_CORES > 1 */
- /* Alert other core to activity */
- core_wake(new_core);
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+/*---------------------------------------------------------------------------
+ * Change the boost state of a thread boosting or unboosting the CPU
+ * as required.
+ *---------------------------------------------------------------------------
+ */
+static inline void boost_thread(struct thread_entry *thread, bool boost)
+{
+ if ((thread->cpu_boost != 0) != boost)
+ {
+ thread->cpu_boost = boost;
+ cpu_boost(boost);
+ }
+}
- /* Do the stack switching, cache_maintenence and switch_thread call -
- requires native code */
- switch_thread_core(core, current);
+void trigger_cpu_boost(void)
+{
+ boost_thread(__running_self_entry(), true);
+}
- /* Finally return the old core to caller */
- return current->retval;
+void cancel_cpu_boost(void)
+{
+ boost_thread(__running_self_entry(), false);
}
-#endif /* NUM_CORES > 1 */
+#endif /* HAVE_SCHEDULER_BOOSTCTRL */
/*---------------------------------------------------------------------------
* Initialize threading API. This assumes interrupts are not yet enabled. On
@@ -1998,127 +1538,56 @@ unsigned int switch_core(unsigned int new_core)
void INIT_ATTR init_threads(void)
{
const unsigned int core = CURRENT_CORE;
- struct thread_entry *thread;
if (core == CPU)
{
- /* Initialize core locks and IDs in all slots */
- int n;
- for (n = 0; n < MAXTHREADS; n++)
+ thread_alloc_init(); /* before using cores! */
+
+ /* Create main thread */
+ struct thread_entry *thread = thread_alloc();
+ if (thread == NULL)
{
- thread = &threads[n];
- corelock_init(&thread->waiter_cl);
- corelock_init(&thread->slot_cl);
- thread->id = THREAD_ID_INIT(n);
+ /* WTF? There really must be a slot available at this stage.
+ * This can fail if, for example, .bss isn't zero'ed out by the
+ * loader or threads is in the wrong section. */
+ THREAD_PANICF("init_threads->no slot", NULL);
}
- }
-
- /* CPU will initialize first and then sleep */
- thread = find_empty_thread_slot();
- if (thread == NULL)
- {
- /* WTF? There really must be a slot available at this stage.
- * This can fail if, for example, .bss isn't zero'ed out by the loader
- * or threads is in the wrong section. */
- THREAD_PANICF("init_threads->no slot", NULL);
- }
+ size_t stack_size;
+ void *stack = __get_main_stack(&stack_size);
+ new_thread_base_init(thread, &stack, &stack_size, __main_thread_name
+ IF_PRIO(, PRIORITY_MAIN_THREAD) IF_COP(, core));
- /* Initialize initially non-zero members of core */
- cores[core].next_tmo_check = current_tick; /* Something not in the past */
+ struct core_entry *corep = __core_id_entry(core);
+ core_rtr_add(corep, thread);
+ corep->running = thread;
- /* Initialize initially non-zero members of slot */
- UNLOCK_THREAD(thread); /* No sync worries yet */
- thread->name = main_thread_name;
- thread->state = STATE_RUNNING;
- IF_COP( thread->core = core; )
-#ifdef HAVE_PRIORITY_SCHEDULING
- corelock_init(&cores[core].rtr_cl);
- thread->base_priority = PRIORITY_USER_INTERFACE;
- prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
- thread->priority = PRIORITY_USER_INTERFACE;
- rtr_add_entry(core, PRIORITY_USER_INTERFACE);
+#ifdef INIT_MAIN_THREAD
+ init_main_thread(&thread->context);
#endif
+ }
- add_to_list_l(&cores[core].running, thread);
-
- if (core == CPU)
- {
- thread->stack = stackbegin;
- thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
-#if NUM_CORES > 1 /* This code path will not be run on single core targets */
- /* Wait for other processors to finish their inits since create_thread
- * isn't safe to call until the kernel inits are done. The first
- * threads created in the system must of course be created by CPU.
- * Another possible approach is to initialize all cores and slots
- * for each core by CPU, let the remainder proceed in parallel and
- * signal CPU when all are finished. */
- core_thread_init(CPU);
- }
- else
+#if NUM_CORES > 1
+ /* Boot CPU:
+ * Wait for other processors to finish their inits since create_thread
+ * isn't safe to call until the kernel inits are done. The first
+ * threads created in the system must of course be created by CPU.
+ * Another possible approach is to initialize all cores and slots
+ * for each core by CPU, let the remainder proceed in parallel and
+ * signal CPU when all are finished.
+ *
+ * Other:
+ * After last processor completes, it should signal all others to
+ * proceed or may signal the next and call thread_exit(). The last one
+ * to finish will signal CPU.
+ */
+ core_thread_init(core);
+
+ if (core != CPU)
{
- /* Initial stack is the idle stack */
- thread->stack = idle_stacks[core];
- thread->stack_size = IDLE_STACK_SIZE;
- /* After last processor completes, it should signal all others to
- * proceed or may signal the next and call thread_exit(). The last one
- * to finish will signal CPU. */
- core_thread_init(core);
- /* Other cores do not have a main thread - go idle inside switch_thread
- * until a thread can run on the core. */
- thread_exit();
-#endif /* NUM_CORES */
+ /* No main thread on coprocessors - go idle and wait */
+ switch_thread();
+ THREAD_PANICF("init_threads() - coprocessor returned", NULL);
}
-#ifdef INIT_MAIN_THREAD
- init_main_thread(&thread->context);
-#endif
-}
-
-/* Unless otherwise defined, do nothing */
-#ifndef YIELD_KERNEL_HOOK
-#define YIELD_KERNEL_HOOK() false
-#endif
-#ifndef SLEEP_KERNEL_HOOK
-#define SLEEP_KERNEL_HOOK(ticks) false
-#endif
-
-/*---------------------------------------------------------------------------
- * Suspends a thread's execution for at least the specified number of ticks.
- *
- * May result in CPU core entering wait-for-interrupt mode if no other thread
- * may be scheduled.
- *
- * NOTE: sleep(0) sleeps until the end of the current tick
- * sleep(n) that doesn't result in rescheduling:
- * n <= ticks suspended < n + 1
- * n to n+1 is a lower bound. Other factors may affect the actual time
- * a thread is suspended before it runs again.
- *---------------------------------------------------------------------------
- */
-unsigned sleep(unsigned ticks)
-{
- /* In certain situations, certain bootloaders in particular, a normal
- * threading call is inappropriate. */
- if (SLEEP_KERNEL_HOOK(ticks))
- return 0; /* Handled */
-
- disable_irq();
- sleep_thread(ticks);
- switch_thread();
- return 0;
-}
-
-/*---------------------------------------------------------------------------
- * Elects another thread to run or, if no other thread may be made ready to
- * run, immediately returns control back to the calling thread.
- *---------------------------------------------------------------------------
- */
-void yield(void)
-{
- /* In certain situations, certain bootloaders in particular, a normal
- * threading call is inappropriate. */
- if (YIELD_KERNEL_HOOK())
- return; /* handled */
-
- switch_thread();
+#endif /* NUM_CORES */
}