summaryrefslogtreecommitdiff
path: root/firmware/kernel/thread-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/kernel/thread-common.c')
-rw-r--r--firmware/kernel/thread-common.c247
1 files changed, 213 insertions, 34 deletions
diff --git a/firmware/kernel/thread-common.c b/firmware/kernel/thread-common.c
index b8b8ffb..aad6610 100644
--- a/firmware/kernel/thread-common.c
+++ b/firmware/kernel/thread-common.c
@@ -18,39 +18,222 @@
* KIND, either express or implied.
*
****************************************************************************/
-#include "thread-internal.h"
+#include "kernel-internal.h"
#include "system.h"
+/* Unless otherwise defined, do nothing */
+#ifndef YIELD_KERNEL_HOOK
+#define YIELD_KERNEL_HOOK() false
+#endif
+#ifndef SLEEP_KERNEL_HOOK
+#define SLEEP_KERNEL_HOOK(ticks) false
+#endif
+
+const char __main_thread_name_str[] = "main";
+
+/* Array indexing is more efficient in inlines if the elements are a native
+ word size (100s of bytes fewer instructions) */
+
+#if NUM_CORES > 1
+static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR;
+struct core_entry *__cores[NUM_CORES] IBSS_ATTR;
+#else
+struct core_entry __cores[NUM_CORES] IBSS_ATTR;
+#endif
+
+static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR;
+struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR;
+
+
+/** Internal functions **/
+
/*---------------------------------------------------------------------------
- * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
- * from each operation or THREAD_NONE of nothing was awakened. Object owning
- * the queue must be locked first.
- *
- * INTERNAL: Intended for use by kernel objects and not for programs.
+ * Find an empty thread slot or NULL if none found. The slot returned will
+ * be locked on multicore.
*---------------------------------------------------------------------------
*/
-unsigned int thread_queue_wake(struct thread_entry **list)
+static struct threadalloc
{
- unsigned result = THREAD_NONE;
+ threadbit_t avail;
+#if NUM_CORES > 1
+ struct corelock cl;
+#endif
+} threadalloc SHAREDBSS_ATTR;
+
+/*---------------------------------------------------------------------------
+ * Initialize the thread allocator
+ *---------------------------------------------------------------------------
+ */
+void thread_alloc_init(void)
+{
+ corelock_init(&threadalloc.cl);
- for (;;)
+ for (unsigned int core = 0; core < NUM_CORES; core++)
{
- unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
+ #if NUM_CORES > 1
+ struct core_entry *c = &__core_entries[core];
+ __cores[core] = c;
+ #else
+ struct core_entry *c = &__cores[core];
+ #endif
+ rtr_queue_init(&c->rtr);
+ corelock_init(&c->rtr_cl);
+ tmo_queue_init(&c->tmo);
+ c->next_tmo_check = current_tick; /* Something not in the past */
+ }
- if (rc == THREAD_NONE)
- break; /* No more threads */
+ for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++)
+ {
+ struct thread_entry *t = &__thread_entries[slotnum];
+ __threads[slotnum] = t;
+ corelock_init(&t->waiter_cl);
+ corelock_init(&t->slot_cl);
+ t->id = THREAD_ID_INIT(slotnum);
+ threadbit_set_bit(&threadalloc.avail, slotnum);
+ }
+}
+
+/*---------------------------------------------------------------------------
+ * Allocate a thread alot
+ *---------------------------------------------------------------------------
+ */
+struct thread_entry * thread_alloc(void)
+{
+ struct thread_entry *thread = NULL;
- result |= rc;
+ corelock_lock(&threadalloc.cl);
+
+ unsigned int slotnum = threadbit_ffs(&threadalloc.avail);
+ if (slotnum < MAXTHREADS)
+ {
+ threadbit_clear_bit(&threadalloc.avail, slotnum);
+ thread = __threads[slotnum];
}
+ corelock_unlock(&threadalloc.cl);
+
+ return thread;
+}
+
+/*---------------------------------------------------------------------------
+ * Free the thread slot of 'thread'
+ *---------------------------------------------------------------------------
+ */
+void thread_free(struct thread_entry *thread)
+{
+ corelock_lock(&threadalloc.cl);
+ threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id));
+ corelock_unlock(&threadalloc.cl);
+}
+
+/*---------------------------------------------------------------------------
+ * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
+ *---------------------------------------------------------------------------
+ */
+void new_thread_id(struct thread_entry *thread)
+{
+ uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT);
+
+ /* If wrapped to 0, make it 1 */
+ if ((id & THREAD_ID_VERSION_MASK) == 0)
+ id |= (1u << THREAD_ID_VERSION_SHIFT);
+
+ thread->id = id;
+}
+
+/*---------------------------------------------------------------------------
+ * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
+ * from each operation or THREAD_NONE of nothing was awakened.
+ *---------------------------------------------------------------------------
+ */
+unsigned int wait_queue_wake(struct __wait_queue *wqp)
+{
+ unsigned result = THREAD_NONE;
+ struct thread_entry *thread;
+
+ while ((thread = WQ_THREAD_FIRST(wqp)))
+ result |= wakeup_thread(thread, WAKEUP_DEFAULT);
+
return result;
}
+/** Public functions **/
+
+#ifdef RB_PROFILE
+void profile_thread(void)
+{
+ profstart(THREAD_ID_SLOT(__running_self_entry()->id));
+}
+#endif
+
+/*---------------------------------------------------------------------------
+ * Return the thread id of the calling thread
+ * --------------------------------------------------------------------------
+ */
+unsigned int thread_self(void)
+{
+ return __running_self_entry()->id;
+}
+
+/*---------------------------------------------------------------------------
+ * Suspends a thread's execution for at least the specified number of ticks.
+ *
+ * May result in CPU core entering wait-for-interrupt mode if no other thread
+ * may be scheduled.
+ *
+ * NOTE: sleep(0) sleeps until the end of the current tick
+ * sleep(n) that doesn't result in rescheduling:
+ * n <= ticks suspended < n + 1
+ * n to n+1 is a lower bound. Other factors may affect the actual time
+ * a thread is suspended before it runs again.
+ *---------------------------------------------------------------------------
+ */
+unsigned sleep(unsigned ticks)
+{
+ /* In certain situations, certain bootloaders in particular, a normal
+ * threading call is inappropriate. */
+ if (SLEEP_KERNEL_HOOK(ticks))
+ return 0; /* Handled */
+
+ disable_irq();
+ sleep_thread(ticks);
+ switch_thread();
+ return 0;
+}
+
+/*---------------------------------------------------------------------------
+ * Elects another thread to run or, if no other thread may be made ready to
+ * run, immediately returns control back to the calling thread.
+ *---------------------------------------------------------------------------
+ */
+void yield(void)
+{
+ /* In certain situations, certain bootloaders in particular, a normal
+ * threading call is inappropriate. */
+ if (YIELD_KERNEL_HOOK())
+ return; /* Handled */
+
+ switch_thread();
+}
+
+
/** Debug screen stuff **/
+void format_thread_name(char *buf, size_t bufsize,
+ const struct thread_entry *thread)
+{
+ const char *name = thread->name;
+ if (!name)
+ name = "";
+
+ const char *fmt = *name ? "%s" : "%s%08lX";
+ snprintf(buf, bufsize, fmt, name, thread->id);
+}
+
+#ifndef HAVE_SDL_THREADS
/*---------------------------------------------------------------------------
- * returns the stack space used in bytes
+ * Returns the maximum percentage of the stack ever used during runtime.
*---------------------------------------------------------------------------
*/
static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
@@ -69,13 +252,9 @@ static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
return usage;
}
+#endif /* HAVE_SDL_THREADS */
#if NUM_CORES > 1
-/*---------------------------------------------------------------------------
- * Returns the maximum percentage of the core's idle stack ever used during
- * runtime.
- *---------------------------------------------------------------------------
- */
int core_get_debug_info(unsigned int core, struct core_debug_info *infop)
{
extern uintptr_t * const idle_stacks[NUM_CORES];
@@ -105,29 +284,29 @@ int thread_get_debug_info(unsigned int thread_id,
if (!infop)
return -1;
- unsigned int slot = THREAD_ID_SLOT(thread_id);
- if (slot >= MAXTHREADS)
+ unsigned int slotnum = THREAD_ID_SLOT(thread_id);
+ if (slotnum >= MAXTHREADS)
return -1;
- extern struct thread_entry threads[MAXTHREADS];
- struct thread_entry *thread = &threads[slot];
+ struct thread_entry *thread = __thread_slot_entry(slotnum);
int oldlevel = disable_irq_save();
- LOCK_THREAD(thread);
+ corelock_lock(&threadalloc.cl);
+ corelock_lock(&thread->slot_cl);
unsigned int state = thread->state;
- if (state != STATE_KILLED)
- {
- const char *name = thread->name;
- if (!name)
- name = "";
+ int ret = 0;
+ if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0)
+ {
bool cpu_boost = false;
#ifdef HAVE_SCHEDULER_BOOSTCTRL
cpu_boost = thread->cpu_boost;
#endif
+#ifndef HAVE_SDL_THREADS
infop->stack_usage = stack_usage(thread->stack, thread->stack_size);
+#endif
#if NUM_CORES > 1
infop->core = thread->core;
#endif
@@ -140,13 +319,13 @@ int thread_get_debug_info(unsigned int thread_id,
cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '),
status_chars[state]);
- const char *fmt = *name ? "%s" : "%s%08lX";
- snprintf(infop->name, sizeof (infop->name), fmt, name,
- thread->id);
+ format_thread_name(infop->name, sizeof (infop->name), thread);
+ ret = 1;
}
- UNLOCK_THREAD(thread);
+ corelock_unlock(&thread->slot_cl);
+ corelock_unlock(&threadalloc.cl);
restore_irq(oldlevel);
- return state == STATE_KILLED ? 0 : 1;
+ return ret;
}