summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--apps/buffering.c9
-rw-r--r--apps/codecs.c36
-rw-r--r--apps/codecs.h52
-rw-r--r--apps/debug_menu.c11
-rw-r--r--apps/main.c2
-rw-r--r--apps/pcmbuf.c23
-rw-r--r--apps/playback.c10
-rw-r--r--apps/plugin.c5
-rw-r--r--apps/plugin.h14
-rw-r--r--apps/plugins/mpegplayer/audio_thread.c6
-rw-r--r--apps/plugins/mpegplayer/disk_buf.c4
-rw-r--r--apps/plugins/mpegplayer/stream_mgr.c4
-rw-r--r--apps/plugins/mpegplayer/video_thread.c6
-rw-r--r--apps/voice_thread.c4
-rw-r--r--firmware/SOURCES12
-rw-r--r--firmware/common/ffs.c54
-rw-r--r--firmware/drivers/ata.c50
-rw-r--r--firmware/drivers/fat.c6
-rw-r--r--firmware/export/config.h14
-rw-r--r--firmware/export/kernel.h94
-rw-r--r--firmware/export/system.h14
-rw-r--r--firmware/export/thread.h371
-rw-r--r--firmware/kernel.c1329
-rw-r--r--firmware/pcm_record.c7
-rw-r--r--firmware/target/arm/ffs-arm.S74
-rw-r--r--firmware/target/arm/i2c-pp.c2
-rw-r--r--firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c2
-rw-r--r--firmware/target/arm/sandisk/ata-c200_e200.c4
-rw-r--r--firmware/target/coldfire/ffs-coldfire.S62
-rw-r--r--firmware/thread.c2535
-rw-r--r--uisimulator/sdl/SOURCES2
-rw-r--r--uisimulator/sdl/kernel-sdl.c168
-rw-r--r--uisimulator/sdl/kernel.c739
-rw-r--r--uisimulator/sdl/system-sdl.h2
-rw-r--r--uisimulator/sdl/thread-sdl.c372
-rw-r--r--uisimulator/sdl/uisdl.c38
36 files changed, 3079 insertions, 3058 deletions
diff --git a/apps/buffering.c b/apps/buffering.c
index 64f522c..0cb428c 100644
--- a/apps/buffering.c
+++ b/apps/buffering.c
@@ -1446,16 +1446,21 @@ void buffering_thread(void)
void buffering_init(void) {
mutex_init(&llist_mutex);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* This behavior not safe atm */
+ mutex_set_preempt(&llist_mutex, false);
+#endif
conf_watermark = BUFFERING_DEFAULT_WATERMARK;
queue_init(&buffering_queue, true);
- queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list);
-
buffering_thread_p = create_thread( buffering_thread, buffering_stack,
sizeof(buffering_stack), CREATE_THREAD_FROZEN,
buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
IF_COP(, CPU));
+
+ queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
+ buffering_thread_p);
}
/* Initialise the buffering subsystem */
diff --git a/apps/codecs.c b/apps/codecs.c
index dfae463..f2c7452 100644
--- a/apps/codecs.c
+++ b/apps/codecs.c
@@ -76,6 +76,7 @@ struct codec_api ci = {
false, /* stop_codec */
0, /* new_track */
0, /* seek_time */
+ NULL, /* struct dsp_config *dsp */
NULL, /* get_codec_memory */
NULL, /* pcmbuf_insert */
NULL, /* set_elapsed */
@@ -95,6 +96,23 @@ struct codec_api ci = {
PREFIX(sleep),
yield,
+#if NUM_CORES > 1
+ create_thread,
+ thread_thaw,
+ thread_wait,
+ semaphore_init,
+ semaphore_wait,
+ semaphore_release,
+ event_init,
+ event_wait,
+ event_set_state,
+#endif
+
+#ifdef CACHE_FUNCTIONS_AS_CALL
+ flush_icache,
+ invalidate_icache,
+#endif
+
/* strings and memory */
strcpy,
strncpy,
@@ -147,24 +165,6 @@ struct codec_api ci = {
/* new stuff at the end, sort into place next time
the API gets incompatible */
-#ifdef CACHE_FUNCTIONS_AS_CALL
- flush_icache,
- invalidate_icache,
-#endif
-
- NULL, /* struct dsp_config *dsp */
-
-#if NUM_CORES > 1
- create_thread,
- thread_thaw,
- thread_wait,
- semaphore_init,
- semaphore_wait,
- semaphore_release,
- event_init,
- event_wait,
- event_set_state,
-#endif
};
void codec_get_full_path(char *path, const char *codec_root_fn)
diff --git a/apps/codecs.h b/apps/codecs.h
index ad6b831..fb5675f 100644
--- a/apps/codecs.h
+++ b/apps/codecs.h
@@ -80,12 +80,12 @@
#define CODEC_ENC_MAGIC 0x52454E43 /* RENC */
/* increase this every time the api struct changes */
-#define CODEC_API_VERSION 22
+#define CODEC_API_VERSION 23
/* update this to latest version if a change to the api struct breaks
backwards compatibility (and please take the opportunity to sort in any
new function which are "waiting" at the end of the function table) */
-#define CODEC_MIN_API_VERSION 22
+#define CODEC_MIN_API_VERSION 23
/* codec return codes */
enum codec_status {
@@ -118,6 +118,9 @@ struct codec_api {
/* If seek_time != 0, codec should seek to that song position (in ms)
if codec supports seeking. */
long seek_time;
+
+ /* The dsp instance to be used for audio output */
+ struct dsp_config *dsp;
/* Returns buffer to malloc array. Only codeclib should need this. */
void* (*get_codec_memory)(size_t *size);
@@ -160,6 +163,28 @@ struct codec_api {
void (*PREFIX(sleep))(int ticks);
void (*yield)(void);
+#if NUM_CORES > 1
+ struct thread_entry *
+ (*create_thread)(void (*function)(void), void* stack,
+ size_t stack_size, unsigned flags, const char *name
+ IF_PRIO(, int priority)
+ IF_COP(, unsigned int core));
+
+ void (*thread_thaw)(struct thread_entry *thread);
+ void (*thread_wait)(struct thread_entry *thread);
+ void (*semaphore_init)(struct semaphore *s, int max, int start);
+ void (*semaphore_wait)(struct semaphore *s);
+ void (*semaphore_release)(struct semaphore *s);
+ void (*event_init)(struct event *e, unsigned int flags);
+ void (*event_wait)(struct event *e, unsigned int for_state);
+ void (*event_set_state)(struct event *e, unsigned int state);
+#endif /* NUM_CORES */
+
+#ifdef CACHE_FUNCTIONS_AS_CALL
+ void (*flush_icache)(void);
+ void (*invalidate_icache)(void);
+#endif
+
/* strings and memory */
char* (*strcpy)(char *dst, const char *src);
char* (*strncpy)(char *dst, const char *src, size_t length);
@@ -218,29 +243,6 @@ struct codec_api {
/* new stuff at the end, sort into place next time
the API gets incompatible */
-#ifdef CACHE_FUNCTIONS_AS_CALL
- void (*flush_icache)(void);
- void (*invalidate_icache)(void);
-#endif
-
- struct dsp_config *dsp;
-
-#if NUM_CORES > 1
- struct thread_entry *
- (*create_thread)(void (*function)(void), void* stack,
- int stack_size, unsigned flags, const char *name
- IF_PRIO(, int priority)
- IF_COP(, unsigned int core));
-
- void (*thread_thaw)(struct thread_entry *thread);
- void (*thread_wait)(struct thread_entry *thread);
- void (*semaphore_init)(struct semaphore *s, int max, int start);
- void (*semaphore_wait)(struct semaphore *s);
- void (*semaphore_release)(struct semaphore *s);
- void (*event_init)(struct event *e, unsigned int flags);
- void (*event_wait)(struct event *e, unsigned int for_state);
- void (*event_set_state)(struct event *e, unsigned int state);
-#endif /* NUM_CORES */
};
/* codec header */
diff --git a/apps/debug_menu.c b/apps/debug_menu.c
index d865f12..fc509ce 100644
--- a/apps/debug_menu.c
+++ b/apps/debug_menu.c
@@ -127,11 +127,6 @@ static char thread_status_char(unsigned status)
[STATE_KILLED] = 'K',
};
-#if NUM_CORES > 1
- if (status == STATE_BUSY) /* Not a state index */
- return '.';
-#endif
-
if (status > THREAD_NUM_STATES)
status = THREAD_NUM_STATES;
@@ -166,15 +161,15 @@ static char* threads_getname(int selected_item, void * data, char *buffer)
thread_get_name(name, 32, thread);
snprintf(buffer, MAX_PATH,
- "%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d ") "%2d%% %s",
+ "%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d %d ") "%2d%% %s",
selected_item,
IF_COP(thread->core,)
#ifdef HAVE_SCHEDULER_BOOSTCTRL
- (thread->boosted) ? '+' :
+ (thread->cpu_boost) ? '+' :
#endif
((thread->state == STATE_RUNNING) ? '*' : ' '),
thread_status_char(thread->state),
- IF_PRIO(thread->priority,)
+ IF_PRIO(thread->base_priority, thread->priority, )
thread_stack_usage(thread), name);
return buffer;
diff --git a/apps/main.c b/apps/main.c
index 5dd92e5..a3a2241 100644
--- a/apps/main.c
+++ b/apps/main.c
@@ -270,7 +270,7 @@ static void init_tagcache(void)
static void init(void)
{
- init_threads();
+ kernel_init();
buffer_init();
set_irq_level(0);
lcd_init();
diff --git a/apps/pcmbuf.c b/apps/pcmbuf.c
index 8153118..8f16c90 100644
--- a/apps/pcmbuf.c
+++ b/apps/pcmbuf.c
@@ -116,7 +116,7 @@ static bool low_latency_mode = false;
static bool pcmbuf_flush;
#ifdef HAVE_PRIORITY_SCHEDULING
-static int codec_thread_priority = 0;
+static int codec_thread_priority = PRIORITY_PLAYBACK;
#endif
extern struct thread_entry *codec_thread_p;
@@ -256,18 +256,21 @@ static void boost_codec_thread(bool boost)
* will starve if the codec thread's priority is boosted. */
if (boost)
{
- if (codec_thread_priority == 0)
+ int priority = (PRIORITY_PLAYBACK - PRIORITY_PLAYBACK_MAX)*pcmbuf_unplayed_bytes
+ / (2*NATIVE_FREQUENCY) + PRIORITY_PLAYBACK_MAX;
+
+ if (priority != codec_thread_priority)
{
- codec_thread_priority = thread_set_priority(
- codec_thread_p, PRIORITY_REALTIME);
- voice_thread_set_priority(PRIORITY_REALTIME);
+ codec_thread_priority = priority;
+ thread_set_priority(codec_thread_p, priority);
+ voice_thread_set_priority(priority);
}
}
- else if (codec_thread_priority != 0)
+ else if (codec_thread_priority != PRIORITY_PLAYBACK)
{
- thread_set_priority(codec_thread_p, codec_thread_priority);
- voice_thread_set_priority(codec_thread_priority);
- codec_thread_priority = 0;
+ thread_set_priority(codec_thread_p, PRIORITY_PLAYBACK);
+ voice_thread_set_priority(PRIORITY_PLAYBACK);
+ codec_thread_priority = PRIORITY_PLAYBACK;
}
}
#endif /* HAVE_PRIORITY_SCHEDULING */
@@ -818,7 +821,7 @@ static bool prepare_insert(size_t length)
if (low_latency_mode)
{
/* 1/4s latency. */
- if (pcmbuf_unplayed_bytes > NATIVE_FREQUENCY * 4 / 4
+ if (pcmbuf_unplayed_bytes > NATIVE_FREQUENCY * 4 / 2
&& pcm_is_playing())
return false;
}
diff --git a/apps/playback.c b/apps/playback.c
index 7eecd23..9005b34 100644
--- a/apps/playback.c
+++ b/apps/playback.c
@@ -2549,9 +2549,7 @@ void audio_init(void)
to send messages. Thread creation will be delayed however so nothing
starts running until ready if something yields such as talk_init. */
queue_init(&audio_queue, true);
- queue_enable_queue_send(&audio_queue, &audio_queue_sender_list);
queue_init(&codec_queue, false);
- queue_enable_queue_send(&codec_queue, &codec_queue_sender_list);
queue_init(&pcmbuf_queue, false);
pcm_init();
@@ -2587,11 +2585,17 @@ void audio_init(void)
codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK)
IF_COP(, CPU));
+ queue_enable_queue_send(&codec_queue, &codec_queue_sender_list,
+ codec_thread_p);
+
audio_thread_p = create_thread(audio_thread, audio_stack,
sizeof(audio_stack), CREATE_THREAD_FROZEN,
- audio_thread_name IF_PRIO(, PRIORITY_SYSTEM)
+ audio_thread_name IF_PRIO(, PRIORITY_USER_INTERFACE)
IF_COP(, CPU));
+ queue_enable_queue_send(&audio_queue, &audio_queue_sender_list,
+ audio_thread_p);
+
#ifdef PLAYBACK_VOICE
voice_thread_init();
#endif
diff --git a/apps/plugin.c b/apps/plugin.c
index 57f836c..db9bd25 100644
--- a/apps/plugin.c
+++ b/apps/plugin.c
@@ -253,15 +253,12 @@ static const struct plugin_api rockbox_api = {
/* kernel/ system */
PREFIX(sleep),
yield,
-#ifdef HAVE_PRIORITY_SCHEDULING
- priority_yield,
-#endif
&current_tick,
default_event_handler,
default_event_handler_ex,
threads,
create_thread,
- remove_thread,
+ thread_exit,
thread_wait,
#if (CONFIG_CODEC == SWCODEC)
mutex_init,
diff --git a/apps/plugin.h b/apps/plugin.h
index cd42656..5762473 100644
--- a/apps/plugin.h
+++ b/apps/plugin.h
@@ -119,12 +119,12 @@
#define PLUGIN_MAGIC 0x526F634B /* RocK */
/* increase this every time the api struct changes */
-#define PLUGIN_API_VERSION 100
+#define PLUGIN_API_VERSION 101
/* update this to latest version if a change to the api struct breaks
backwards compatibility (and please take the opportunity to sort in any
new function which are "waiting" at the end of the function table) */
-#define PLUGIN_MIN_API_VERSION 100
+#define PLUGIN_MIN_API_VERSION 101
/* plugin return codes */
enum plugin_status {
@@ -351,19 +351,16 @@ struct plugin_api {
/* kernel/ system */
void (*PREFIX(sleep))(int ticks);
void (*yield)(void);
-#ifdef HAVE_PRIORITY_SCHEDULING
- void (*priority_yield)(void);
-#endif
volatile long* current_tick;
long (*default_event_handler)(long event);
long (*default_event_handler_ex)(long event, void (*callback)(void *), void *parameter);
struct thread_entry* threads;
struct thread_entry* (*create_thread)(void (*function)(void), void* stack,
- int stack_size, unsigned flags,
+ size_t stack_size, unsigned flags,
const char *name
IF_PRIO(, int priority)
IF_COP(, unsigned int core));
- void (*remove_thread)(struct thread_entry *thread);
+ void (*thread_exit)(void);
void (*thread_wait)(struct thread_entry *thread);
#if CONFIG_CODEC == SWCODEC
void (*mutex_init)(struct mutex *m);
@@ -405,7 +402,8 @@ struct plugin_api {
int ticks);
#if CONFIG_CODEC == SWCODEC
void (*queue_enable_queue_send)(struct event_queue *q,
- struct queue_sender_list *send);
+ struct queue_sender_list *send,
+ struct thread_entry *owner);
bool (*queue_empty)(const struct event_queue *q);
void (*queue_wait)(struct event_queue *q, struct queue_event *ev);
intptr_t (*queue_send)(struct event_queue *q, long id,
diff --git a/apps/plugins/mpegplayer/audio_thread.c b/apps/plugins/mpegplayer/audio_thread.c
index 2bb766a..7d2f849 100644
--- a/apps/plugins/mpegplayer/audio_thread.c
+++ b/apps/plugins/mpegplayer/audio_thread.c
@@ -714,12 +714,14 @@ bool audio_thread_init(void)
/* Start the audio thread */
audio_str.hdr.q = &audio_str_queue;
rb->queue_init(audio_str.hdr.q, false);
- rb->queue_enable_queue_send(audio_str.hdr.q, &audio_str_queue_send);
/* One-up on the priority since the core DSP over-yields internally */
audio_str.thread = rb->create_thread(
audio_thread, audio_stack, audio_stack_size, 0,
- "mpgaudio" IF_PRIO(,PRIORITY_PLAYBACK-1) IF_COP(, CPU));
+ "mpgaudio" IF_PRIO(,PRIORITY_PLAYBACK-4) IF_COP(, CPU));
+
+ rb->queue_enable_queue_send(audio_str.hdr.q, &audio_str_queue_send,
+ audio_str.thread);
if (audio_str.thread == NULL)
return false;
diff --git a/apps/plugins/mpegplayer/disk_buf.c b/apps/plugins/mpegplayer/disk_buf.c
index a408b90..289918f 100644
--- a/apps/plugins/mpegplayer/disk_buf.c
+++ b/apps/plugins/mpegplayer/disk_buf.c
@@ -837,7 +837,6 @@ bool disk_buf_init(void)
disk_buf.q = &disk_buf_queue;
rb->queue_init(disk_buf.q, false);
- rb->queue_enable_queue_send(disk_buf.q, &disk_buf_queue_send);
disk_buf.state = TSTATE_EOS;
disk_buf.status = STREAM_STOPPED;
@@ -886,6 +885,9 @@ bool disk_buf_init(void)
disk_buf_thread, disk_buf_stack, sizeof(disk_buf_stack), 0,
"mpgbuffer" IF_PRIO(, PRIORITY_BUFFERING) IF_COP(, CPU));
+ rb->queue_enable_queue_send(disk_buf.q, &disk_buf_queue_send,
+ disk_buf.thread);
+
if (disk_buf.thread == NULL)
return false;
diff --git a/apps/plugins/mpegplayer/stream_mgr.c b/apps/plugins/mpegplayer/stream_mgr.c
index 9da664e..b962c5b 100644
--- a/apps/plugins/mpegplayer/stream_mgr.c
+++ b/apps/plugins/mpegplayer/stream_mgr.c
@@ -987,7 +987,6 @@ int stream_init(void)
stream_mgr.q = &stream_mgr_queue;
rb->queue_init(stream_mgr.q, false);
- rb->queue_enable_queue_send(stream_mgr.q, &stream_mgr_queue_send);
/* sets audiosize and returns buffer pointer */
mem = rb->plugin_get_audio_buffer(&memsize);
@@ -1028,6 +1027,9 @@ int stream_init(void)
stream_mgr_thread_stack, sizeof(stream_mgr_thread_stack),
0, "mpgstream_mgr" IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU));
+ rb->queue_enable_queue_send(stream_mgr.q, &stream_mgr_queue_send,
+ stream_mgr.thread);
+
if (stream_mgr.thread == NULL)
{
rb->splash(HZ, "Could not create stream manager thread!");
diff --git a/apps/plugins/mpegplayer/video_thread.c b/apps/plugins/mpegplayer/video_thread.c
index 6508d28..d16eb77 100644
--- a/apps/plugins/mpegplayer/video_thread.c
+++ b/apps/plugins/mpegplayer/video_thread.c
@@ -955,7 +955,7 @@ static void video_thread(void)
else
{
/* Just a little left - spin and be accurate */
- rb->priority_yield();
+ rb->yield();
if (str_have_msg(&video_str))
goto message_wait;
}
@@ -998,13 +998,15 @@ bool video_thread_init(void)
video_str.hdr.q = &video_str_queue;
rb->queue_init(video_str.hdr.q, false);
- rb->queue_enable_queue_send(video_str.hdr.q, &video_str_queue_send);
/* We put the video thread on another processor for multi-core targets. */
video_str.thread = rb->create_thread(
video_thread, video_stack, VIDEO_STACKSIZE, 0,
"mpgvideo" IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, COP));
+ rb->queue_enable_queue_send(video_str.hdr.q, &video_str_queue_send,
+ video_str.thread);
+
if (video_str.thread == NULL)
return false;
diff --git a/apps/voice_thread.c b/apps/voice_thread.c
index 7bf52d4..6e70f43 100644
--- a/apps/voice_thread.c
+++ b/apps/voice_thread.c
@@ -424,12 +424,14 @@ void voice_thread_init(void)
{
logf("Starting voice thread");
queue_init(&voice_queue, false);
- queue_enable_queue_send(&voice_queue, &voice_queue_sender_list);
mutex_init(&voice_mutex);
event_init(&voice_event, STATE_SIGNALED | EVENT_MANUAL);
voice_thread_p = create_thread(voice_thread, voice_stack,
sizeof(voice_stack), CREATE_THREAD_FROZEN,
voice_thread_name IF_PRIO(, PRIORITY_PLAYBACK) IF_COP(, CPU));
+
+ queue_enable_queue_send(&voice_queue, &voice_queue_sender_list,
+ voice_thread_p);
} /* voice_thread_init */
/* Unfreeze the voice thread */
diff --git a/firmware/SOURCES b/firmware/SOURCES
index 6ef129f..0a8ac2a 100644
--- a/firmware/SOURCES
+++ b/firmware/SOURCES
@@ -9,11 +9,11 @@ usb.c
#ifdef ROCKBOX_HAS_LOGF
logf.c
#endif /* ROCKBOX_HAS_LOGF */
+kernel.c
#ifndef SIMULATOR
#ifdef RB_PROFILE
profile.c
#endif /* RB_PROFILE */
-kernel.c
rolo.c
thread.c
timer.c
@@ -274,6 +274,10 @@ target/sh/archos/descramble.S
#ifndef SIMULATOR
target/coldfire/crt0.S
+#ifdef HAVE_PRIORITY_SCHEDULING
+common/ffs.c
+target/coldfire/ffs-coldfire.S
+#endif
target/coldfire/memcpy-coldfire.S
target/coldfire/memmove-coldfire.S
target/coldfire/memset-coldfire.S
@@ -299,6 +303,9 @@ common/strlen.c
#ifndef SIMULATOR
target/arm/memset-arm.S
target/arm/memset16-arm.S
+#ifdef HAVE_PRIORITY_SCHEDULING
+target/arm/ffs-arm.S
+#endif
#if CONFIG_I2C == I2C_PP5024 || CONFIG_I2C == I2C_PP5020 || CONFIG_I2C == I2C_PP5002
target/arm/i2c-pp.c
#elif CONFIG_I2C == I2C_PNX0101
@@ -345,6 +352,9 @@ target/arm/crt0.S
#else
+#ifdef HAVE_PRIORITY_SCHEDULING
+common/ffs.c
+#endif
common/memcpy.c
common/memmove.c
common/memset.c
diff --git a/firmware/common/ffs.c b/firmware/common/ffs.c
new file mode 100644
index 0000000..e3dc9b0
--- /dev/null
+++ b/firmware/common/ffs.c
@@ -0,0 +1,54 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2008 by Michael Sevakis
+ *
+ * All files in this archive are subject to the GNU General Public License.
+ * See the file COPYING in the source tree root for full license agreement.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+#include "config.h"
+#include <inttypes.h>
+
+/* find_first_set_bit() - this is a C version of the ffs algorithm devised
+ * by D.Seal and posted to comp.sys.arm on 16 Feb 1994.
+ *
+ * Find the index of the least significant set bit in the word.
+ * return values:
+ * 0 - bit 0 is set
+ * 1 - bit 1 is set
+ * ...
+ * 31 - bit 31 is set
+ * 32 - no bits set
+ */
+
+/* Table shared with assembly code */
+const uint8_t L_ffs_table[64] ICONST_ATTR =
+{
+/* 0 1 2 3 4 5 6 7 */
+/* ----------------------------------------- */
+ 32, 0, 1, 12, 2, 6, 0, 13, /* 0- 7 */
+ 3, 0, 7, 0, 0, 0, 0, 14, /* 8-15 */
+ 10, 4, 0, 0, 8, 0, 0, 25, /* 16-23 */
+ 0, 0, 0, 0, 0, 21, 27, 15, /* 24-31 */
+ 31, 11, 5, 0, 0, 0, 0, 0, /* 32-39 */
+ 9, 0, 0, 24, 0, 0, 20, 26, /* 40-47 */
+ 30, 0, 0, 0, 0, 23, 0, 19, /* 48-55 */
+ 29, 0, 22, 18, 28, 17, 16, 0, /* 56-63 */
+};
+
+#if !defined(CPU_COLDFIRE)
+int find_first_set_bit(uint32_t val)
+{
+ return L_ffs_table[((val & -val)*0x0450fbaf) >> 26];
+}
+#endif
diff --git a/firmware/drivers/ata.c b/firmware/drivers/ata.c
index e067235..56b303d 100644
--- a/firmware/drivers/ata.c
+++ b/firmware/drivers/ata.c
@@ -95,52 +95,6 @@ static unsigned short identify_info[SECTOR_SIZE/2];
#ifdef MAX_PHYS_SECTOR_SIZE
-/** This is temporary **/
-/* Define the mutex functions to use the special hack object */
-#define mutex_init ata_spin_init
-#define mutex_lock ata_spin_lock
-#define mutex_unlock ata_spin_unlock
-
-void ata_spin_init(struct mutex *m)
-{
- m->thread = NULL;
- m->locked = 0;
- m->count = 0;
-#if CONFIG_CORELOCK == SW_CORELOCK
- corelock_init(&m->cl);
-#endif
-}
-
-void ata_spin_lock(struct mutex *m)
-{
- struct thread_entry *current = thread_get_current();
-
- if (current == m->thread)
- {
- m->count++;
- return;
- }
-
- while (test_and_set(&m->locked, 1, &m->cl))
- yield();
-
- m->thread = current;
-}
-
-void ata_spin_unlock(struct mutex *m)
-{
- if (m->count > 0)
- {
- m->count--;
- return;
- }
-
- m->thread = NULL;
- test_and_set(&m->locked, 0, &m->cl);
-}
-
-/****/
-
struct sector_cache_entry {
bool inuse;
unsigned long sectornum; /* logical sector */
@@ -163,7 +117,7 @@ STATICIRAM int wait_for_bsy(void)
long timeout = current_tick + HZ*30;
while (TIME_BEFORE(current_tick, timeout) && (ATA_STATUS & STATUS_BSY)) {
last_disk_activity = current_tick;
- priority_yield();
+ yield();
}
if (TIME_BEFORE(current_tick, timeout))
@@ -185,7 +139,7 @@ STATICIRAM int wait_for_rdy(void)
while (TIME_BEFORE(current_tick, timeout) &&
!(ATA_ALT_STATUS & STATUS_RDY)) {
last_disk_activity = current_tick;
- priority_yield();
+ yield();
}
if (TIME_BEFORE(current_tick, timeout))
diff --git a/firmware/drivers/fat.c b/firmware/drivers/fat.c
index 8ae3b70..a538b92 100644
--- a/firmware/drivers/fat.c
+++ b/firmware/drivers/fat.c
@@ -259,6 +259,12 @@ void fat_init(void)
mutex_init(&cache_mutex);
}
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Disable this because it is dangerous due to the assumption that
+ * mutex_unlock won't yield */
+ mutex_set_preempt(&cache_mutex, false);
+#endif
+
/* mark the FAT cache as unused */
for(i = 0;i < FAT_CACHE_SIZE;i++)
{
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 6a04504..1a288dd 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -371,10 +371,20 @@
#endif
/* define for all cpus from ARM family */
+#if (CONFIG_CPU == IMX31L)
+#define CPU_ARM
+#define ARM_ARCH 6 /* ARMv6 */
+#endif
+
+#if defined(CPU_TCC77X) || defined(CPU_TCC780X)
+#define CPU_ARM
+#define ARM_ARCH 5 /* ARMv5 */
+#endif
+
#if defined(CPU_PP) || (CONFIG_CPU == PNX0101) || (CONFIG_CPU == S3C2440) \
- || (CONFIG_CPU == DSC25) || (CONFIG_CPU == IMX31L) || (CONFIG_CPU == DM320) \
- || defined(CPU_TCC77X) || defined(CPU_TCC780X)
+ || (CONFIG_CPU == DSC25) || (CONFIG_CPU == DM320)
#define CPU_ARM
+#define ARM_ARCH 4 /* ARMv4 */
#endif
/* Determine if accesses should be strictly long aligned. */
diff --git a/firmware/export/kernel.h b/firmware/export/kernel.h
index 70a2f98..78403c8 100644
--- a/firmware/export/kernel.h
+++ b/firmware/export/kernel.h
@@ -76,6 +76,8 @@
#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
+#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
+
struct queue_event
{
long id;
@@ -87,68 +89,92 @@ struct queue_sender_list
{
/* If non-NULL, there is a thread waiting for the corresponding event */
/* Must be statically allocated to put in non-cached ram. */
- struct thread_entry *senders[QUEUE_LENGTH];
+ struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
+ struct thread_entry *list; /* list of senders in map */
/* Send info for last message dequeued or NULL if replied or not sent */
struct thread_entry *curr_sender;
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker blocker;
+#endif
};
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define QUEUE_GET_THREAD(q) \
+ (((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
+#else
+/* Queue without priority enabled have no owner provision _at this time_ */
+#define QUEUE_GET_THREAD(q) \
+ (NULL)
+#endif
+
struct event_queue
{
- struct thread_queue queue; /* Waiter list */
+ struct thread_entry *queue; /* waiter list */
struct queue_event events[QUEUE_LENGTH]; /* list of events */
- unsigned int read; /* head of queue */
- unsigned int write; /* tail of queue */
+ unsigned int read; /* head of queue */
+ unsigned int write; /* tail of queue */
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- struct queue_sender_list *send; /* list of threads waiting for
- reply to an event */
+ struct queue_sender_list *send; /* list of threads waiting for
+ reply to an event */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker *blocker_p; /* priority inheritance info
+ for sync message senders */
#endif
-#if NUM_CORES > 1
- struct corelock cl; /* inter-core sync */
#endif
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define MUTEX_SET_THREAD(m, t) ((m)->blocker.thread = (t))
+#define MUTEX_GET_THREAD(m) ((m)->blocker.thread)
+#else
+#define MUTEX_SET_THREAD(m, t) ((m)->thread = (t))
+#define MUTEX_GET_THREAD(m) ((m)->thread)
+#endif
+
struct mutex
{
- struct thread_entry *queue; /* Waiter list */
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock cl; /* inter-core sync */
+ struct thread_entry *queue; /* waiter list */
+ int count; /* lock owner recursion count */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct blocker blocker; /* priority inheritance info
+ for waiters */
+ bool no_preempt; /* don't allow higher-priority thread
+ to be scheduled even if woken */
+#else
+ struct thread_entry *thread;
#endif
- struct thread_entry *thread; /* thread that owns lock */
- int count; /* lock owner recursion count */
- unsigned char locked; /* locked semaphore */
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
+ unsigned char locked; /* locked semaphore */
};
#if NUM_CORES > 1
struct spinlock
{
- struct corelock cl; /* inter-core sync */
- struct thread_entry *thread; /* lock owner */
- int count; /* lock owner recursion count */
+ struct thread_entry *thread; /* lock owner */
+ int count; /* lock owner recursion count */
+ struct corelock cl; /* multiprocessor sync */
};
#endif
#ifdef HAVE_SEMAPHORE_OBJECTS
struct semaphore
{
- struct thread_entry *queue; /* Waiter list */
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock cl; /* inter-core sync */
-#endif
- int count; /* # of waits remaining before unsignaled */
- int max; /* maximum # of waits to remain signaled */
+ struct thread_entry *queue; /* Waiter list */
+ int count; /* # of waits remaining before unsignaled */
+ int max; /* maximum # of waits to remain signaled */
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
#endif
#ifdef HAVE_EVENT_OBJECTS
struct event
{
- struct thread_entry *queues[2]; /* waiters for each state */
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock cl; /* inter-core sync */
-#endif
- unsigned char automatic; /* event performs auto-reset */
- unsigned char state; /* state: 1 = signaled */
+ struct thread_entry *queues[2]; /* waiters for each state */
+ unsigned char automatic; /* event performs auto-reset */
+ unsigned char state; /* state: 1 = signaled */
+ IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
#endif
@@ -208,7 +234,9 @@ extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
int ticks);
extern void queue_post(struct event_queue *q, long id, intptr_t data);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send);
+extern void queue_enable_queue_send(struct event_queue *q,
+ struct queue_sender_list *send,
+ struct thread_entry *owner);
extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
extern void queue_reply(struct event_queue *q, intptr_t retval);
extern bool queue_in_queue_send(struct event_queue *q);
@@ -223,6 +251,11 @@ extern int queue_broadcast(long id, intptr_t data);
extern void mutex_init(struct mutex *m);
extern void mutex_lock(struct mutex *m);
extern void mutex_unlock(struct mutex *m);
+#ifdef HAVE_PRIORITY_SCHEDULING
+/* Temporary function to disable mutex preempting a thread on unlock */
+static inline void mutex_set_preempt(struct mutex *m, bool preempt)
+ { m->no_preempt = !preempt; }
+#endif
#if NUM_CORES > 1
extern void spinlock_init(struct spinlock *l);
extern void spinlock_lock(struct spinlock *l);
@@ -240,6 +273,5 @@ extern void event_init(struct event *e, unsigned int flags);
extern void event_wait(struct event *e, unsigned int for_state);
extern void event_set_state(struct event *e, unsigned int state);
#endif /* HAVE_EVENT_OBJECTS */
-#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
#endif /* _KERNEL_H_ */
diff --git a/firmware/export/system.h b/firmware/export/system.h
index b973b57..dc38532 100644
--- a/firmware/export/system.h
+++ b/firmware/export/system.h
@@ -159,6 +159,20 @@ int get_cpu_boost_counter(void);
#define H_TO_BE32(x) (x)
#endif
+/* Get the byte offset of a type's member */
+#define OFFSETOF(type, membername) ((off_t)&((type *)0)->membername)
+
+/* Get the type pointer from one of its members */
+#define TYPE_FROM_MEMBER(type, memberptr, membername) \
+ ((type *)((intptr_t)(memberptr) - OFFSETOF(type, membername)))
+
+/* returns index of first set bit + 1 or 0 if no bits are set */
+int find_first_set_bit(uint32_t val);
+
+static inline __attribute__((always_inline))
+uint32_t isolate_first_bit(uint32_t val)
+ { return val & -val; }
+
/* gcc 3.4 changed the format of the constraints */
#if (__GNUC__ >= 3) && (__GNUC_MINOR__ > 3) || (__GNUC__ >= 4)
#define I_CONSTRAINT "I08"
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index dd97ab1..bb1cb7c 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -26,21 +26,35 @@
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
* by giving high priority threads more CPU time than less priority threads
- * when they need it.
- *
+ * when they need it. Priority is differential such that the priority
+ * difference between a lower priority runnable thread and the highest priority
+ * runnable thread determines the amount of aging nescessary for the lower
+ * priority thread to be scheduled in order to prevent starvation.
+ *
* If software playback codec pcm buffer is going down to critical, codec
- * can change it own priority to REALTIME to override user interface and
+ * can gradually raise its own priority to override user interface and
* prevent playback skipping.
*/
+#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
+#define PRIORITY_RESERVED_LOW 32 /* Reserved */
#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
-#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
-#define PRIORITY_REALTIME 1
-#define PRIORITY_USER_INTERFACE 4 /* The main thread */
-#define PRIORITY_RECORDING 4 /* Recording thread */
-#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
-#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
-#define PRIORITY_SYSTEM 6 /* All other firmware threads */
-#define PRIORITY_BACKGROUND 8 /* Normal application threads */
+#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
+/* Realtime range reserved for threads that will not allow threads of lower
+ * priority to age and run (future expansion) */
+#define PRIORITY_REALTIME_1 1
+#define PRIORITY_REALTIME_2 2
+#define PRIORITY_REALTIME_3 3
+#define PRIORITY_REALTIME_4 4
+#define PRIORITY_REALTIME 4 /* Lowest realtime range */
+#define PRIORITY_USER_INTERFACE 16 /* The main thread */
+#define PRIORITY_RECORDING 16 /* Recording thread */
+#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
+#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
+#define PRIORITY_BUFFERING 16 /* Codec buffering thread */
+#define PRIORITY_SYSTEM 18 /* All other firmware threads */
+#define PRIORITY_BACKGROUND 20 /* Normal application threads */
+#define NUM_PRIORITIES 32
+#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
/* TODO: Only a minor tweak to create_thread would be needed to let
* thread slots be caller allocated - no essential threading functionality
@@ -59,80 +73,40 @@
#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
-/**
- * "Busy" values that can be swapped into a variable to indicate
- * that the variable or object pointed to is in use by another processor
- * core. When accessed, the busy value is swapped-in while the current
- * value is atomically returned. If the swap returns the busy value,
- * the processor should retry the operation until some other value is
- * returned. When modification is finished, the new value should be
- * written which unlocks it and updates it atomically.
- *
- * Procedure:
- * while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
- *
- * Modify/examine object at mem location or variable. Create "new_value"
- * as suitable.
- *
- * variable = new_value or curr_value;
- *
- * To check a value for busy and perform an operation if not:
- * curr_value = swap(&variable, BUSY_VALUE);
- *
- * if (curr_value != BUSY_VALUE)
- * {
- * Modify/examine object at mem location or variable. Create "new_value"
- * as suitable.
- * variable = new_value or curr_value;
- * }
- * else
- * {
- * Do nothing - already busy
- * }
- *
- * Only ever restore when an actual value is returned or else it could leave
- * the variable locked permanently if another processor unlocked in the
- * meantime. The next access attempt would deadlock for all processors since
- * an abandoned busy status would be left behind.
- */
-#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
-#define STATE_BUSYu8 UINT8_MAX
-#define STATE_BUSYi INT_MIN
-
#ifndef SIMULATOR
/* Need to keep structures inside the header file because debug_menu
* needs them. */
#ifdef CPU_COLDFIRE
struct regs
{
- unsigned int macsr; /* 0 - EMAC status register */
- unsigned int d[6]; /* 4-24 - d2-d7 */
- unsigned int a[5]; /* 28-44 - a2-a6 */
- void *sp; /* 48 - Stack pointer (a7) */
- void *start; /* 52 - Thread start address, or NULL when started */
+ uint32_t macsr; /* 0 - EMAC status register */
+ uint32_t d[6]; /* 4-24 - d2-d7 */
+ uint32_t a[5]; /* 28-44 - a2-a6 */
+ uint32_t sp; /* 48 - Stack pointer (a7) */
+ uint32_t start; /* 52 - Thread start address, or NULL when started */
};
#elif CONFIG_CPU == SH7034
struct regs
{
- unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */
- void *sp; /* 28 - Stack pointer (r15) */
- void *pr; /* 32 - Procedure register */
- void *start; /* 36 - Thread start address, or NULL when started */
+ uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
+ uint32_t sp; /* 28 - Stack pointer (r15) */
+ uint32_t pr; /* 32 - Procedure register */
+ uint32_t start; /* 36 - Thread start address, or NULL when started */
};
#elif defined(CPU_ARM)
struct regs
{
- unsigned int r[8]; /* 0-28 - Registers r4-r11 */
- void *sp; /* 32 - Stack pointer (r13) */
- unsigned int lr; /* 36 - r14 (lr) */
- void *start; /* 40 - Thread start address, or NULL when started */
+ uint32_t r[8]; /* 0-28 - Registers r4-r11 */
+ uint32_t sp; /* 32 - Stack pointer (r13) */
+ uint32_t lr; /* 36 - r14 (lr) */
+ uint32_t start; /* 40 - Thread start address, or NULL when started */
};
#endif /* CONFIG_CPU */
#else
struct regs
{
void *t; /* Simulator OS thread */
- void *c; /* Condition for blocking and sync */
+ void *s; /* Semaphore for blocking and wakeup */
void (*start)(void); /* Start function */
};
#endif /* !SIMULATOR */
@@ -154,13 +128,13 @@ enum
thread_thaw is called with its ID */
THREAD_NUM_STATES,
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
-#if NUM_CORES > 1
- STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
-#endif
};
#if NUM_CORES > 1
-#define THREAD_DESTRUCT ((const char *)0x84905617)
+/* Pointer value for name field to indicate thread is being killed. Using
+ * an alternate STATE_* won't work since that would interfere with operation
+ * while the thread is still running. */
+#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
#endif
/* Link information for lists thread is in */
@@ -188,7 +162,7 @@ void corelock_unlock(struct corelock *cl);
/* Use native atomic swap/exchange instruction */
struct corelock
{
- unsigned char locked;
+ volatile unsigned char locked;
} __attribute__((packed));
#define corelock_init(cl) \
@@ -207,15 +181,36 @@ struct corelock
#define corelock_unlock(cl)
#endif /* core locking selection */
-struct thread_queue
+#ifdef HAVE_PRIORITY_SCHEDULING
+struct blocker
{
- struct thread_entry *queue; /* list of threads waiting -
- _must_ be first member */
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock cl; /* lock for atomic list operations */
-#endif
+ struct thread_entry *thread; /* thread blocking other threads
+ (aka. object owner) */
+ int priority; /* highest priority waiter */
+ struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
+};
+
+/* Choices of wakeup protocol */
+
+/* For transfer of object ownership by one thread to another thread by
+ * the owning thread itself (mutexes) */
+struct thread_entry *
+ wakeup_priority_protocol_transfer(struct thread_entry *thread);
+
+/* For release by owner where ownership doesn't change - other threads,
+ * interrupts, timeouts, etc. (mutex timeout, queues) */
+struct thread_entry *
+ wakeup_priority_protocol_release(struct thread_entry *thread);
+
+
+struct priority_distribution
+{
+ uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
+ uint32_t mask; /* Bitmask of hist entries that are not zero */
};
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
/* Information kept in each thread slot
* members are arranged according to size - largest first - in order
* to ensure both alignment and packing at the same time.
@@ -224,88 +219,83 @@ struct thread_entry
{
struct regs context; /* Register context at switch -
_must_ be first member */
- void *stack; /* Pointer to top of stack */
+ uintptr_t *stack; /* Pointer to top of stack */
const char *name; /* Thread name */
long tmo_tick; /* Tick when thread should be woken from
- timeout */
+ timeout -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_list l; /* Links for blocked/waking/running -
circular linkage in both directions */
struct thread_list tmo; /* Links for timeout list -
- Self-pointer-terminated in reverse direction,
- NULL-terminated in forward direction */
- struct thread_queue *bqp; /* Pointer to list variable in kernel
+ Circular in reverse direction, NULL-terminated in
+ forward direction -
+ states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
+ struct thread_entry **bqp; /* Pointer to list variable in kernel
object where thread is blocked - used
- for implicit unblock and explicit wake */
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct thread_entry **bqnlp; /* Pointer to list variable in kernel
- object where thread is blocked - non-locked
- operations will be used */
+ for implicit unblock and explicit wake
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+#if NUM_CORES > 1
+ struct corelock *obj_cl; /* Object corelock where thead is blocked -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#endif
struct thread_entry *queue; /* List of threads waiting for thread to be
removed */
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- intptr_t retval; /* Return value from a blocked operation */
+ #define HAVE_WAKEUP_EXT_CB
+ void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
+ performs special steps needed when being
+ forced off of an object's wait queue that
+ go beyond the standard wait queue removal
+ and priority disinheritance */
+ /* Only enabled when using queue_send for now */
+#endif
+#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
+ intptr_t retval; /* Return value from a blocked operation/
+ misc. use */
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
- long last_run; /* Last tick when started */
+ /* Priority summary of owned objects that support inheritance */
+ struct blocker *blocker; /* Pointer to blocker when this thread is blocked
+ on an object that supports PIP -
+ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
+ struct priority_distribution pdist; /* Priority summary of owned objects
+ that have blocked threads and thread's own
+ base priority */
+ int skip_count; /* Number of times skipped if higher priority
+ thread was running */
#endif
unsigned short stack_size; /* Size of stack in bytes */
#ifdef HAVE_PRIORITY_SCHEDULING
- unsigned char priority; /* Current priority */
- unsigned char priority_x; /* Inherited priority - right now just a
- runtime guarantee flag */
+ unsigned char base_priority; /* Base priority (set explicitly during
+ creation or thread_set_priority) */
+ unsigned char priority; /* Scheduled priority (higher of base or
+ all threads blocked by this one) */
#endif
unsigned char state; /* Thread slot state (STATE_*) */
-#if NUM_CORES > 1
- unsigned char core; /* The core to which thread belongs */
-#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL
- unsigned char boosted; /* CPU frequency boost flag */
+ unsigned char cpu_boost; /* CPU frequency boost flag */
#endif
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock cl; /* Corelock to lock thread slot */
+#if NUM_CORES > 1
+ unsigned char core; /* The core to which thread belongs */
+ struct corelock waiter_cl; /* Corelock for thread_wait */
+ struct corelock slot_cl; /* Corelock to lock thread slot */
#endif
};
#if NUM_CORES > 1
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
-#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
-#if CONFIG_CORELOCK == CORELOCK_SWAP
-#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
-#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
-#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
-#endif /* CONFIG_CORELOCK */
-#define TBOP_UNLOCK_CORELOCK 0x04
-#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
-#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
-#define TBOP_SWITCH_CORE 0x20 /* Call the core switch preparation routine */
+enum
+{
+ TBOP_CLEAR = 0, /* No operation to do */
+ TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
+ TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
+};
struct thread_blk_ops
{
-#if CONFIG_CORELOCK != SW_CORELOCK
- union
- {
- int var_iv; /* int variable value to set */
- uint8_t var_u8v; /* unsigned char valur to set */
- struct thread_entry *list_v; /* list pointer queue value to set */
- };
-#endif
- union
- {
-#if CONFIG_CORELOCK != SW_CORELOCK
- int *var_ip; /* pointer to int variable */
- uint8_t *var_u8p; /* pointer to unsigned char varuable */
-#endif
- struct thread_queue *list_p; /* pointer to list variable */
- };
-#if CONFIG_CORELOCK == SW_CORELOCK
- struct corelock *cl_p; /* corelock to unlock */
- struct thread_entry *thread; /* thread to unlock */
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- unsigned char state; /* new thread state (performs unlock) */
-#endif /* SOFTWARE_CORELOCK */
- unsigned char flags; /* TBOP_* flags */
+ struct corelock *cl_p; /* pointer to corelock */
+ unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
@@ -316,28 +306,30 @@ struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
- struct thread_entry *running; /* threads that are running */
+ struct thread_entry *running; /* threads that are running (RTR) */
struct thread_entry *timeout; /* threads that are on a timeout before
running again */
- /* "Shared" lists - cores interact in a synchronized manner - access
- is locked between cores and interrupts */
- struct thread_queue waking; /* intermediate locked list that
- hold threads other core should wake up
- on next task switch */
+ struct thread_entry *block_task; /* Task going off running list */
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct priority_distribution rtr; /* Summary of running and ready-to-run
+ threads */
+#endif
long next_tmo_check; /* soonest time to check tmo threads */
#if NUM_CORES > 1
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
-#endif /* NUM_CORES */
#ifdef HAVE_PRIORITY_SCHEDULING
- unsigned char highest_priority;
+ struct corelock rtr_cl; /* Lock for rtr list */
#endif
+#endif /* NUM_CORES */
};
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
+#define IFN_PRIO(...)
#else
#define IF_PRIO(...)
+#define IFN_PRIO(...) __VA_ARGS__
#endif
/* Macros generate better code than an inline function is this case */
@@ -464,13 +456,18 @@ struct core_entry
void core_idle(void);
void core_wake(IF_COP_VOID(unsigned int core));
+/* Initialize the scheduler */
+void init_threads(void);
+
+/* Allocate a thread in the scheduler */
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
struct thread_entry*
- create_thread(void (*function)(void), void* stack, int stack_size,
+ create_thread(void (*function)(void), void* stack, size_t stack_size,
unsigned flags, const char *name
IF_PRIO(, int priority)
IF_COP(, unsigned int core));
+/* Set and clear the CPU frequency boost flag for the calling thread */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void);
void cancel_cpu_boost(void);
@@ -478,86 +475,52 @@ void cancel_cpu_boost(void);
#define trigger_cpu_boost()
#define cancel_cpu_boost()
#endif
+/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
+ * Has no effect on a thread not frozen. */
void thread_thaw(struct thread_entry *thread);
+/* Wait for a thread to exit */
void thread_wait(struct thread_entry *thread);
+/* Exit the current thread */
+void thread_exit(void);
+#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
+#define ALLOW_REMOVE_THREAD
+/* Remove a thread from the scheduler */
void remove_thread(struct thread_entry *thread);
-void switch_thread(struct thread_entry *old);
-void sleep_thread(int ticks);
+#endif
-/**
- * Setup to allow using thread queues as locked or non-locked without speed
- * sacrifices in both core locking types.
- *
- * The blocking/waking function inline two different version of the real
- * function into the stubs when a software or other separate core locking
- * mechanism is employed.
- *
- * When a simple test-and-set or similar instruction is available, locking
- * has no cost and so one version is used and the internal worker is called
- * directly.
- *
- * CORELOCK_NONE is treated the same as when an atomic instruction can be
- * used.
- */
+/* Switch to next runnable thread */
+void switch_thread(void);
+/* Blocks a thread for at least the specified number of ticks (0 = wait until
+ * next tick) */
+void sleep_thread(int ticks);
+/* Indefinitely blocks the current thread on a thread queue */
+void block_thread(struct thread_entry *current);
+/* Blocks the current thread on a thread queue until explicitely woken or
+ * the timeout is reached */
+void block_thread_w_tmo(struct thread_entry *current, int timeout);
+
+/* Return bit flags for thread wakeup */
+#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
+#define THREAD_OK 0x1 /* A thread was woken up */
+#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
+ higher priority than current were woken) */
-/* Blocks the current thread on a thread queue */
-#if CONFIG_CORELOCK == SW_CORELOCK
-void block_thread(struct thread_queue *tq);
-void block_thread_no_listlock(struct thread_entry **list);
-#else
-void _block_thread(struct thread_queue *tq);
-static inline void block_thread(struct thread_queue *tq)
- { _block_thread(tq); }
-static inline void block_thread_no_listlock(struct thread_entry **list)
- { _block_thread((struct thread_queue *)list); }
-#endif /* CONFIG_CORELOCK */
-
-/* Blocks the current thread on a thread queue for a max amount of time
- * There is no "_no_listlock" version because timeout blocks without sync on
- * the blocking queues is not permitted since either core could access the
- * list at any time to do an implicit wake. In other words, objects with
- * timeout support require lockable queues. */
-void block_thread_w_tmo(struct thread_queue *tq, int timeout);
-
-/* Wakes up the thread at the head of the queue */
-#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
-#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
-#if CONFIG_CORELOCK == SW_CORELOCK
-struct thread_entry * wakeup_thread(struct thread_queue *tq);
-struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
-#else
-struct thread_entry * _wakeup_thread(struct thread_queue *list);
-static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
- { return _wakeup_thread(tq); }
-static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
- { return _wakeup_thread((struct thread_queue *)list); }
-#endif /* CONFIG_CORELOCK */
-
-/* Initialize a thread_queue object. */
-static inline void thread_queue_init(struct thread_queue *tq)
- { tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
/* A convenience function for waking an entire queue of threads. */
-static inline void thread_queue_wake(struct thread_queue *tq)
- { while (wakeup_thread(tq) != NULL); }
-/* The no-listlock version of thread_queue_wake() */
-static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
- { while (wakeup_thread_no_listlock(list) != NULL); }
+unsigned int thread_queue_wake(struct thread_entry **list);
+
+/* Wakeup a thread at the head of a list */
+unsigned int wakeup_thread(struct thread_entry **list);
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(struct thread_entry *thread, int priority);
int thread_get_priority(struct thread_entry *thread);
-/* Yield that guarantees thread execution once per round regardless of
- thread's scheduler priority - basically a transient realtime boost
- without altering the scheduler's thread precedence. */
-void priority_yield(void);
-#else
-#define priority_yield yield
#endif /* HAVE_PRIORITY_SCHEDULING */
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#endif
struct thread_entry * thread_get_current(void);
-void init_threads(void);
+
+/* Debugging info - only! */
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
diff --git a/firmware/kernel.c b/firmware/kernel.c
index 835181f..47c0d58 100644
--- a/firmware/kernel.c
+++ b/firmware/kernel.c
@@ -20,21 +20,30 @@
#include <string.h>
#include "config.h"
#include "kernel.h"
+#ifdef SIMULATOR
+#include "system-sdl.h"
+#include "debug.h"
+#endif
#include "thread.h"
#include "cpu.h"
#include "system.h"
#include "panic.h"
/* Make this nonzero to enable more elaborate checks on objects */
-#ifdef DEBUG
-#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
+#if defined(DEBUG) || defined(SIMULATOR)
+#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
#else
#define KERNEL_OBJECT_CHECKS 0
#endif
#if KERNEL_OBJECT_CHECKS
+#ifdef SIMULATOR
+#define KERNEL_ASSERT(exp, msg...) \
+ ({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
+#else
#define KERNEL_ASSERT(exp, msg...) \
({ if (!({ exp; })) panicf(msg); })
+#endif
#else
#define KERNEL_ASSERT(exp, msg...) ({})
#endif
@@ -52,9 +61,7 @@ static struct
{
int count;
struct event_queue *queues[MAX_NUM_QUEUES];
-#if NUM_CORES > 1
- struct corelock cl;
-#endif
+ IF_COP( struct corelock cl; )
} all_queues NOCACHEBSS_ATTR;
/****************************************************************************
@@ -77,6 +84,334 @@ void kernel_init(void)
}
}
+/****************************************************************************
+ * Timer tick
+ ****************************************************************************/
+#if CONFIG_CPU == SH7034
+void tick_start(unsigned int interval_in_ms)
+{
+ unsigned long count;
+
+ count = CPU_FREQ * interval_in_ms / 1000 / 8;
+
+ if(count > 0x10000)
+ {
+ panicf("Error! The tick interval is too long (%d ms)\n",
+ interval_in_ms);
+ return;
+ }
+
+ /* We are using timer 0 */
+
+ TSTR &= ~0x01; /* Stop the timer */
+ TSNC &= ~0x01; /* No synchronization */
+ TMDR &= ~0x01; /* Operate normally */
+
+ TCNT0 = 0; /* Start counting at 0 */
+ GRA0 = (unsigned short)(count - 1);
+ TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
+
+ /* Enable interrupt on level 1 */
+ IPRC = (IPRC & ~0x00f0) | 0x0010;
+
+ TSR0 &= ~0x01;
+ TIER0 = 0xf9; /* Enable GRA match interrupt */
+
+ TSTR |= 0x01; /* Start timer 1 */
+}
+
+void IMIA0(void) __attribute__ ((interrupt_handler));
+void IMIA0(void)
+{
+ int i;
+
+ /* Run through the list of tick tasks */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i])
+ {
+ tick_funcs[i]();
+ }
+ }
+
+ current_tick++;
+
+ TSR0 &= ~0x01;
+}
+#elif defined(CPU_COLDFIRE)
+void tick_start(unsigned int interval_in_ms)
+{
+ unsigned long count;
+ int prescale;
+
+ count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
+
+ if(count > 0x10000)
+ {
+ panicf("Error! The tick interval is too long (%d ms)\n",
+ interval_in_ms);
+ return;
+ }
+
+ prescale = cpu_frequency / CPU_FREQ;
+ /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
+ changes within timer.c */
+
+ /* We are using timer 0 */
+
+ TRR0 = (unsigned short)(count - 1); /* The reference count */
+ TCN0 = 0; /* reset the timer */
+ TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
+ /* restart, CLK/16, enabled, prescaler */
+
+ TER0 = 0xff; /* Clear all events */
+
+ ICR1 = 0x8c; /* Interrupt on level 3.0 */
+ IMR &= ~0x200;
+}
+
+void TIMER0(void) __attribute__ ((interrupt_handler));
+void TIMER0(void)
+{
+ int i;
+
+ /* Run through the list of tick tasks */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i])
+ {
+ tick_funcs[i]();
+ }
+ }
+
+ current_tick++;
+
+ TER0 = 0xff; /* Clear all events */
+}
+
+#elif defined(CPU_PP)
+
+#ifndef BOOTLOADER
+void TIMER1(void)
+{
+ int i;
+
+ /* Run through the list of tick tasks (using main core) */
+ TIMER1_VAL; /* Read value to ack IRQ */
+
+ /* Run through the list of tick tasks using main CPU core -
+ wake up the COP through its control interface to provide pulse */
+ for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if (tick_funcs[i])
+ {
+ tick_funcs[i]();
+ }
+ }
+
+#if NUM_CORES > 1
+ /* Pulse the COP */
+ core_wake(COP);
+#endif /* NUM_CORES */
+
+ current_tick++;
+}
+#endif
+
+/* Must be last function called init kernel/thread initialization */
+void tick_start(unsigned int interval_in_ms)
+{
+#ifndef BOOTLOADER
+ TIMER1_CFG = 0x0;
+ TIMER1_VAL;
+ /* enable timer */
+ TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
+ /* unmask interrupt source */
+ CPU_INT_EN = TIMER1_MASK;
+#else
+ /* We don't enable interrupts in the bootloader */
+ (void)interval_in_ms;
+#endif
+}
+
+#elif CONFIG_CPU == PNX0101
+
+void timer_handler(void)
+{
+ int i;
+
+ /* Run through the list of tick tasks */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i])
+ tick_funcs[i]();
+ }
+
+ current_tick++;
+
+ TIMER0.clr = 0;
+}
+
+void tick_start(unsigned int interval_in_ms)
+{
+ TIMER0.ctrl &= ~0x80; /* Disable the counter */
+ TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
+ TIMER0.load = 3000000 * interval_in_ms / 1000;
+ TIMER0.ctrl &= ~0xc; /* No prescaler */
+ TIMER0.clr = 1; /* Clear the interrupt request */
+
+ irq_set_int_handler(IRQ_TIMER0, timer_handler);
+ irq_enable_int(IRQ_TIMER0);
+
+ TIMER0.ctrl |= 0x80; /* Enable the counter */
+}
+#endif
+
+int tick_add_task(void (*f)(void))
+{
+ int i;
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ /* Add a task if there is room */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i] == NULL)
+ {
+ tick_funcs[i] = f;
+ set_irq_level(oldlevel);
+ return 0;
+ }
+ }
+ set_irq_level(oldlevel);
+ panicf("Error! tick_add_task(): out of tasks");
+ return -1;
+}
+
+int tick_remove_task(void (*f)(void))
+{
+ int i;
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ /* Remove a task if it is there */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i] == f)
+ {
+ tick_funcs[i] = NULL;
+ set_irq_level(oldlevel);
+ return 0;
+ }
+ }
+
+ set_irq_level(oldlevel);
+ return -1;
+}
+
+/****************************************************************************
+ * Tick-based interval timers/one-shots - be mindful this is not really
+ * intended for continuous timers but for events that need to run for a short
+ * time and be cancelled without further software intervention.
+ ****************************************************************************/
+#ifdef INCLUDE_TIMEOUT_API
+static struct timeout *tmo_list = NULL; /* list of active timeout events */
+
+/* timeout tick task - calls event handlers when they expire
+ * Event handlers may alter ticks, callback and data during operation.
+ */
+static void timeout_tick(void)
+{
+ unsigned long tick = current_tick;
+ struct timeout *curr, *next;
+
+ for (curr = tmo_list; curr != NULL; curr = next)
+ {
+ next = (struct timeout *)curr->next;
+
+ if (TIME_BEFORE(tick, curr->expires))
+ continue;
+
+ /* this event has expired - call callback */
+ if (curr->callback(curr))
+ *(long *)&curr->expires = tick + curr->ticks; /* reload */
+ else
+ timeout_cancel(curr); /* cancel */
+ }
+}
+
+/* Cancels a timeout callback - can be called from the ISR */
+void timeout_cancel(struct timeout *tmo)
+{
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ if (tmo_list != NULL)
+ {
+ struct timeout *curr = tmo_list;
+ struct timeout *prev = NULL;
+
+ while (curr != tmo && curr != NULL)
+ {
+ prev = curr;
+ curr = (struct timeout *)curr->next;
+ }
+
+ if (curr != NULL)
+ {
+ /* in list */
+ if (prev == NULL)
+ tmo_list = (struct timeout *)curr->next;
+ else
+ *(const struct timeout **)&prev->next = curr->next;
+
+ if (tmo_list == NULL)
+ tick_remove_task(timeout_tick); /* last one - remove task */
+ }
+ /* not in list or tmo == NULL */
+ }
+
+ set_irq_level(oldlevel);
+}
+
+/* Adds a timeout callback - calling with an active timeout resets the
+ interval - can be called from the ISR */
+void timeout_register(struct timeout *tmo, timeout_cb_type callback,
+ int ticks, intptr_t data)
+{
+ int oldlevel;
+ struct timeout *curr;
+
+ if (tmo == NULL)
+ return;
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ /* see if this one is already registered */
+ curr = tmo_list;
+ while (curr != tmo && curr != NULL)
+ curr = (struct timeout *)curr->next;
+
+ if (curr == NULL)
+ {
+ /* not found - add it */
+ if (tmo_list == NULL)
+ tick_add_task(timeout_tick); /* first one - add task */
+
+ *(struct timeout **)&tmo->next = tmo_list;
+ tmo_list = tmo;
+ }
+
+ tmo->callback = callback;
+ tmo->ticks = ticks;
+ tmo->data = data;
+ *(long *)&tmo->expires = current_tick + ticks;
+
+ set_irq_level(oldlevel);
+}
+
+#endif /* INCLUDE_TIMEOUT_API */
+
+/****************************************************************************
+ * Thread stuff
+ ****************************************************************************/
void sleep(int ticks)
{
#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
@@ -96,9 +431,11 @@ void sleep(int ticks)
#elif defined(CPU_PP) && defined(BOOTLOADER)
unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
while (TIME_BEFORE(USEC_TIMER, stop))
- switch_thread(NULL);
+ switch_thread();
#else
+ set_irq_level(HIGHEST_IRQ_LEVEL);
sleep_thread(ticks);
+ switch_thread();
#endif
}
@@ -107,7 +444,7 @@ void yield(void)
#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
/* Some targets don't like yielding in the bootloader */
#else
- switch_thread(NULL);
+ switch_thread();
#endif
}
@@ -116,43 +453,50 @@ void yield(void)
****************************************************************************/
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-/* Moves waiting thread's descriptor to the current sender when a
- message is dequeued */
-static void queue_fetch_sender(struct queue_sender_list *send,
- unsigned int i)
-{
- struct thread_entry **spp = &send->senders[i];
-
- if(*spp)
- {
- send->curr_sender = *spp;
- *spp = NULL;
- }
-}
+/****************************************************************************
+ * Sender thread queue structure that aids implementation of priority
+ * inheritance on queues because the send list structure is the same as
+ * for all other kernel objects:
+ *
+ * Example state:
+ * E0 added with queue_send and removed by thread via queue_wait(_w_tmo)
+ * E3 was posted with queue_post
+ * 4 events remain enqueued (E1-E4)
+ *
+ * rd wr
+ * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
+ * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
+ * \/ \/ \/
+ * q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
+ * q->send->curr_sender: /\
+ *
+ * Thread has E0 in its own struct queue_event.
+ *
+ ****************************************************************************/
/* Puts the specified return value in the waiting thread's return value
* and wakes the thread.
- * 1) A sender should be confirmed to exist before calling which makes it
- * more efficent to reject the majority of cases that don't need this
- called.
- * 2) Requires interrupts disabled since queue overflows can cause posts
- * from interrupt handlers to wake threads. Not doing so could cause
- * an attempt at multiple wakes or other problems.
+ *
+ * A sender should be confirmed to exist before calling which makes it
+ * more efficent to reject the majority of cases that don't need this
+ * called.
*/
static void queue_release_sender(struct thread_entry **sender,
intptr_t retval)
{
- (*sender)->retval = retval;
- wakeup_thread_no_listlock(sender);
- /* This should _never_ happen - there must never be multiple
- threads in this list and it is a corrupt state */
- KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
+ struct thread_entry *thread = *sender;
+
+ *sender = NULL; /* Clear slot. */
+ thread->wakeup_ext_cb = NULL; /* Clear callback. */
+ thread->retval = retval; /* Assign thread-local return value. */
+ *thread->bqp = thread; /* Move blocking queue head to thread since
+ wakeup_thread wakes the first thread in
+ the list. */
+ wakeup_thread(thread->bqp);
}
/* Releases any waiting threads that are queued with queue_send -
* reply with 0.
- * Disable IRQs and lock before calling since it uses
- * queue_release_sender.
*/
static void queue_release_all_senders(struct event_queue *q)
{
@@ -172,25 +516,103 @@ static void queue_release_all_senders(struct event_queue *q)
}
}
+/* Callback to do extra forced removal steps from sender list in addition
+ * to the normal blocking queue removal and priority dis-inherit */
+static void queue_remove_sender_thread_cb(struct thread_entry *thread)
+{
+ *((struct thread_entry **)thread->retval) = NULL;
+ thread->wakeup_ext_cb = NULL;
+ thread->retval = 0;
+}
+
/* Enables queue_send on the specified queue - caller allocates the extra
- data structure. Only queues which are taken to be owned by a thread should
- enable this. Public waiting is not permitted. */
+ * data structure. Only queues which are taken to be owned by a thread should
+ * enable this however an official owner is not compulsory but must be
+ * specified for priority inheritance to operate.
+ *
+ * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous
+ * messages results in an undefined order of message replies.
+ */
void queue_enable_queue_send(struct event_queue *q,
- struct queue_sender_list *send)
+ struct queue_sender_list *send,
+ struct thread_entry *owner)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
- q->send = NULL;
- if(send != NULL)
+ if(send != NULL && q->send == NULL)
{
memset(send, 0, sizeof(*send));
+#ifdef HAVE_PRIORITY_SCHEDULING
+ send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
+ send->blocker.priority = PRIORITY_IDLE;
+ send->blocker.thread = owner;
+ if(owner != NULL)
+ q->blocker_p = &send->blocker;
+#endif
q->send = send;
}
corelock_unlock(&q->cl);
set_irq_level(oldlevel);
+
+ (void)owner;
}
+
+/* Unblock a blocked thread at a given event index */
+static inline void queue_do_unblock_sender(struct queue_sender_list *send,
+ unsigned int i)
+{
+ if(send)
+ {
+ struct thread_entry **spp = &send->senders[i];
+
+ if(*spp)
+ {
+ queue_release_sender(spp, 0);
+ }
+ }
+}
+
+/* Perform the auto-reply sequence */
+static inline void queue_do_auto_reply(struct queue_sender_list *send)
+{
+ if(send && send->curr_sender)
+ {
+ /* auto-reply */
+ queue_release_sender(&send->curr_sender, 0);
+ }
+}
+
+/* Moves waiting thread's refrence from the senders array to the
+ * current_sender which represents the thread waiting for a reponse to the
+ * last message removed from the queue. This also protects the thread from
+ * being bumped due to overflow which would not be a valid action since its
+ * message _is_ being processed at this point. */
+static inline void queue_do_fetch_sender(struct queue_sender_list *send,
+ unsigned int rd)
+{
+ if(send)
+ {
+ struct thread_entry **spp = &send->senders[rd];
+
+ if(*spp)
+ {
+ /* Move thread reference from array to the next thread
+ that queue_reply will release */
+ send->curr_sender = *spp;
+ (*spp)->retval = (intptr_t)spp;
+ *spp = NULL;
+ }
+ /* else message was posted asynchronously with queue_post */
+ }
+}
+#else
+/* Empty macros for when synchoronous sending is not made */
+#define queue_release_all_senders(q)
+#define queue_do_unblock_sender(send, i)
+#define queue_do_auto_reply(send)
+#define queue_do_fetch_sender(send, rd)
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
/* Queue must not be available for use during this call */
@@ -204,11 +626,12 @@ void queue_init(struct event_queue *q, bool register_queue)
}
corelock_init(&q->cl);
- thread_queue_init(&q->queue);
- q->read = 0;
- q->write = 0;
+ q->queue = NULL;
+ q->read = 0;
+ q->write = 0;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- q->send = NULL; /* No message sending by default */
+ q->send = NULL; /* No message sending by default */
+ IF_PRIO( q->blocker_p = NULL; )
#endif
if(register_queue)
@@ -254,14 +677,20 @@ void queue_delete(struct event_queue *q)
corelock_unlock(&all_queues.cl);
- /* Release threads waiting on queue head */
+ /* Release thread(s) waiting on queue head */
thread_queue_wake(&q->queue);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- /* Release waiting threads for reply and reply to any dequeued
- message waiting for one. */
- queue_release_all_senders(q);
- queue_reply(q, 0);
+ if(q->send)
+ {
+ /* Release threads waiting for replies */
+ queue_release_all_senders(q);
+
+ /* Reply to any dequeued message waiting for one */
+ queue_do_auto_reply(q->send);
+
+ q->send = NULL;
+ }
#endif
q->read = 0;
@@ -279,33 +708,32 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
int oldlevel;
unsigned int rd;
+#ifdef HAVE_PRIORITY_SCHEDULING
+ KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
+ QUEUE_GET_THREAD(q) == thread_get_current(),
+ "queue_wait->wrong thread\n");
+#endif
+
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send && q->send->curr_sender)
- {
- /* auto-reply */
- queue_release_sender(&q->send->curr_sender, 0);
- }
-#endif
+ /* auto-reply */
+ queue_do_auto_reply(q->send);
if (q->read == q->write)
{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
do
{
-#if CONFIG_CORELOCK == CORELOCK_NONE
-#elif CONFIG_CORELOCK == SW_CORELOCK
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &q->cl;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &q->cl.locked;
- cores[core].blk_ops.var_u8v = 0;
-#endif /* CONFIG_CORELOCK */
- block_thread(&q->queue);
+ IF_COP( current->obj_cl = &q->cl; )
+ current->bqp = &q->queue;
+
+ block_thread(current);
+
+ corelock_unlock(&q->cl);
+ switch_thread();
+
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
}
@@ -316,13 +744,8 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
rd = q->read++ & QUEUE_LENGTH_MASK;
*ev = q->events[rd];
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send && q->send->senders[rd])
- {
- /* Get data for a waiting thread if one */
- queue_fetch_sender(q->send, rd);
- }
-#endif
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
corelock_unlock(&q->cl);
set_irq_level(oldlevel);
@@ -332,31 +755,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
{
int oldlevel;
+#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
+ KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
+ QUEUE_GET_THREAD(q) == thread_get_current(),
+ "queue_wait_w_tmo->wrong thread\n");
+#endif
+
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if (q->send && q->send->curr_sender)
- {
- /* auto-reply */
- queue_release_sender(&q->send->curr_sender, 0);
- }
-#endif
+ /* Auto-reply */
+ queue_do_auto_reply(q->send);
if (q->read == q->write && ticks > 0)
{
-#if CONFIG_CORELOCK == CORELOCK_NONE
-#elif CONFIG_CORELOCK == SW_CORELOCK
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &q->cl;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &q->cl.locked;
- cores[core].blk_ops.var_u8v = 0;
-#endif
- block_thread_w_tmo(&q->queue, ticks);
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
+ IF_COP( current->obj_cl = &q->cl; )
+ current->bqp = &q->queue;
+
+ block_thread_w_tmo(current, ticks);
+ corelock_unlock(&q->cl);
+
+ switch_thread();
+
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
}
@@ -367,14 +789,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
{
unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
*ev = q->events[rd];
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send && q->send->senders[rd])
- {
- /* Get data for a waiting thread if one */
- queue_fetch_sender(q->send, rd);
- }
-#endif
+ /* Get data for a waiting thread if one */
+ queue_do_fetch_sender(q->send, rd);
}
else
{
@@ -398,18 +814,8 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
q->events[wr].id = id;
q->events[wr].data = data;
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send)
- {
- struct thread_entry **spp = &q->send->senders[wr];
-
- if (*spp)
- {
- /* overflow protect - unblock any thread waiting at this index */
- queue_release_sender(spp, 0);
- }
- }
-#endif
+ /* overflow protect - unblock any thread waiting at this index */
+ queue_do_unblock_sender(q->send, wr);
/* Wakeup a waiting thread if any */
wakeup_thread(&q->queue);
@@ -436,8 +842,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
if(q->send)
{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry **spp = &q->send->senders[wr];
+ struct queue_sender_list *send = q->send;
+ struct thread_entry **spp = &send->senders[wr];
+ struct thread_entry *current = cores[CURRENT_CORE].running;
if(*spp)
{
@@ -448,17 +855,20 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
/* Wakeup a waiting thread if any */
wakeup_thread(&q->queue);
-#if CONFIG_CORELOCK == CORELOCK_NONE
-#elif CONFIG_CORELOCK == SW_CORELOCK
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &q->cl;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- cores[core].blk_ops.flags = TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &q->cl.locked;
- cores[core].blk_ops.var_u8v = 0;
-#endif
- block_thread_no_listlock(spp);
- return cores[core].running->retval;
+ /* Save thread in slot, add to list and wait for reply */
+ *spp = current;
+ IF_COP( current->obj_cl = &q->cl; )
+ IF_PRIO( current->blocker = q->blocker_p; )
+ current->wakeup_ext_cb = queue_remove_sender_thread_cb;
+ current->retval = (intptr_t)spp;
+ current->bqp = &send->list;
+
+ block_thread(current);
+
+ corelock_unlock(&q->cl);
+ switch_thread();
+
+ return current->retval;
}
/* Function as queue_post if sending is not enabled */
@@ -497,37 +907,22 @@ void queue_reply(struct event_queue *q, intptr_t retval)
{
if(q->send && q->send->curr_sender)
{
-#if NUM_CORES > 1
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
/* Double-check locking */
- if(q->send && q->send->curr_sender)
+ IF_COP( if(q->send && q->send->curr_sender) )
{
-#endif
-
queue_release_sender(&q->send->curr_sender, retval);
-
-#if NUM_CORES > 1
}
+
corelock_unlock(&q->cl);
set_irq_level(oldlevel);
-#endif
}
}
-#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
-
-/* Poll queue to see if a message exists - careful in using the result if
- * queue_remove_from_head is called when messages are posted - possibly use
- * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
- * unsignals the queue may cause an unwanted block */
-bool queue_empty(const struct event_queue* q)
-{
- return ( q->read == q->write );
-}
bool queue_peek(struct event_queue *q, struct queue_event *ev)
{
- if (q->read == q->write)
+ if(q->read == q->write)
return false;
bool have_msg = false;
@@ -535,7 +930,7 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
- if (q->read != q->write)
+ if(q->read != q->write)
{
*ev = q->events[q->read & QUEUE_LENGTH_MASK];
have_msg = true;
@@ -546,6 +941,16 @@ bool queue_peek(struct event_queue *q, struct queue_event *ev)
return have_msg;
}
+#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
+
+/* Poll queue to see if a message exists - careful in using the result if
+ * queue_remove_from_head is called when messages are posted - possibly use
+ * queue_wait_w_tmo(&q, 0) in that case or else a removed message that
+ * unsignals the queue may cause an unwanted block */
+bool queue_empty(const struct event_queue* q)
+{
+ return ( q->read == q->write );
+}
void queue_clear(struct event_queue* q)
{
@@ -554,11 +959,9 @@ void queue_clear(struct event_queue* q)
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
corelock_lock(&q->cl);
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* Release all threads waiting in the queue for a reply -
dequeued sent message will be handled by owning thread */
queue_release_all_senders(q);
-#endif
q->read = 0;
q->write = 0;
@@ -583,18 +986,9 @@ void queue_remove_from_head(struct event_queue *q, long id)
break;
}
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send)
- {
- struct thread_entry **spp = &q->send->senders[rd];
+ /* Release any thread waiting on this message */
+ queue_do_unblock_sender(q->send, rd);
- if (*spp)
- {
- /* Release any thread waiting on this message */
- queue_release_sender(spp, 0);
- }
- }
-#endif
q->read++;
}
@@ -636,397 +1030,72 @@ int queue_broadcast(long id, intptr_t data)
}
/****************************************************************************
- * Timer tick
- ****************************************************************************/
-#if CONFIG_CPU == SH7034
-void tick_start(unsigned int interval_in_ms)
-{
- unsigned long count;
-
- count = CPU_FREQ * interval_in_ms / 1000 / 8;
-
- if(count > 0x10000)
- {
- panicf("Error! The tick interval is too long (%d ms)\n",
- interval_in_ms);
- return;
- }
-
- /* We are using timer 0 */
-
- TSTR &= ~0x01; /* Stop the timer */
- TSNC &= ~0x01; /* No synchronization */
- TMDR &= ~0x01; /* Operate normally */
-
- TCNT0 = 0; /* Start counting at 0 */
- GRA0 = (unsigned short)(count - 1);
- TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
-
- /* Enable interrupt on level 1 */
- IPRC = (IPRC & ~0x00f0) | 0x0010;
-
- TSR0 &= ~0x01;
- TIER0 = 0xf9; /* Enable GRA match interrupt */
-
- TSTR |= 0x01; /* Start timer 1 */
-}
-
-void IMIA0(void) __attribute__ ((interrupt_handler));
-void IMIA0(void)
-{
- int i;
-
- /* Run through the list of tick tasks */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i])
- {
- tick_funcs[i]();
- }
- }
-
- current_tick++;
-
- TSR0 &= ~0x01;
-}
-#elif defined(CPU_COLDFIRE)
-void tick_start(unsigned int interval_in_ms)
-{
- unsigned long count;
- int prescale;
-
- count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
-
- if(count > 0x10000)
- {
- panicf("Error! The tick interval is too long (%d ms)\n",
- interval_in_ms);
- return;
- }
-
- prescale = cpu_frequency / CPU_FREQ;
- /* Note: The prescaler is later adjusted on-the-fly on CPU frequency
- changes within timer.c */
-
- /* We are using timer 0 */
-
- TRR0 = (unsigned short)(count - 1); /* The reference count */
- TCN0 = 0; /* reset the timer */
- TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
- /* restart, CLK/16, enabled, prescaler */
-
- TER0 = 0xff; /* Clear all events */
-
- ICR1 = 0x8c; /* Interrupt on level 3.0 */
- IMR &= ~0x200;
-}
-
-void TIMER0(void) __attribute__ ((interrupt_handler));
-void TIMER0(void)
-{
- int i;
-
- /* Run through the list of tick tasks */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i])
- {
- tick_funcs[i]();
- }
- }
-
- current_tick++;
-
- TER0 = 0xff; /* Clear all events */
-}
-
-#elif defined(CPU_PP)
-
-#ifndef BOOTLOADER
-void TIMER1(void)
-{
- int i;
-
- /* Run through the list of tick tasks (using main core) */
- TIMER1_VAL; /* Read value to ack IRQ */
-
- /* Run through the list of tick tasks using main CPU core -
- wake up the COP through its control interface to provide pulse */
- for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if (tick_funcs[i])
- {
- tick_funcs[i]();
- }
- }
-
-#if NUM_CORES > 1
- /* Pulse the COP */
- core_wake(COP);
-#endif /* NUM_CORES */
-
- current_tick++;
-}
-#endif
-
-/* Must be last function called init kernel/thread initialization */
-void tick_start(unsigned int interval_in_ms)
-{
-#ifndef BOOTLOADER
- TIMER1_CFG = 0x0;
- TIMER1_VAL;
- /* enable timer */
- TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
- /* unmask interrupt source */
- CPU_INT_EN = TIMER1_MASK;
-#else
- /* We don't enable interrupts in the bootloader */
- (void)interval_in_ms;
-#endif
-}
-
-#elif CONFIG_CPU == PNX0101
-
-void timer_handler(void)
-{
- int i;
-
- /* Run through the list of tick tasks */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i])
- tick_funcs[i]();
- }
-
- current_tick++;
-
- TIMER0.clr = 0;
-}
-
-void tick_start(unsigned int interval_in_ms)
-{
- TIMER0.ctrl &= ~0x80; /* Disable the counter */
- TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
- TIMER0.load = 3000000 * interval_in_ms / 1000;
- TIMER0.ctrl &= ~0xc; /* No prescaler */
- TIMER0.clr = 1; /* Clear the interrupt request */
-
- irq_set_int_handler(IRQ_TIMER0, timer_handler);
- irq_enable_int(IRQ_TIMER0);
-
- TIMER0.ctrl |= 0x80; /* Enable the counter */
-}
-#endif
-
-int tick_add_task(void (*f)(void))
-{
- int i;
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- /* Add a task if there is room */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i] == NULL)
- {
- tick_funcs[i] = f;
- set_irq_level(oldlevel);
- return 0;
- }
- }
- set_irq_level(oldlevel);
- panicf("Error! tick_add_task(): out of tasks");
- return -1;
-}
-
-int tick_remove_task(void (*f)(void))
-{
- int i;
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- /* Remove a task if it is there */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i] == f)
- {
- tick_funcs[i] = NULL;
- set_irq_level(oldlevel);
- return 0;
- }
- }
-
- set_irq_level(oldlevel);
- return -1;
-}
-
-/****************************************************************************
- * Tick-based interval timers/one-shots - be mindful this is not really
- * intended for continuous timers but for events that need to run for a short
- * time and be cancelled without further software intervention.
- ****************************************************************************/
-#ifdef INCLUDE_TIMEOUT_API
-static struct timeout *tmo_list = NULL; /* list of active timeout events */
-
-/* timeout tick task - calls event handlers when they expire
- * Event handlers may alter ticks, callback and data during operation.
- */
-static void timeout_tick(void)
-{
- unsigned long tick = current_tick;
- struct timeout *curr, *next;
-
- for (curr = tmo_list; curr != NULL; curr = next)
- {
- next = (struct timeout *)curr->next;
-
- if (TIME_BEFORE(tick, curr->expires))
- continue;
-
- /* this event has expired - call callback */
- if (curr->callback(curr))
- *(long *)&curr->expires = tick + curr->ticks; /* reload */
- else
- timeout_cancel(curr); /* cancel */
- }
-}
-
-/* Cancels a timeout callback - can be called from the ISR */
-void timeout_cancel(struct timeout *tmo)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- if (tmo_list != NULL)
- {
- struct timeout *curr = tmo_list;
- struct timeout *prev = NULL;
-
- while (curr != tmo && curr != NULL)
- {
- prev = curr;
- curr = (struct timeout *)curr->next;
- }
-
- if (curr != NULL)
- {
- /* in list */
- if (prev == NULL)
- tmo_list = (struct timeout *)curr->next;
- else
- *(const struct timeout **)&prev->next = curr->next;
-
- if (tmo_list == NULL)
- tick_remove_task(timeout_tick); /* last one - remove task */
- }
- /* not in list or tmo == NULL */
- }
-
- set_irq_level(oldlevel);
-}
-
-/* Adds a timeout callback - calling with an active timeout resets the
- interval - can be called from the ISR */
-void timeout_register(struct timeout *tmo, timeout_cb_type callback,
- int ticks, intptr_t data)
-{
- int oldlevel;
- struct timeout *curr;
-
- if (tmo == NULL)
- return;
-
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- /* see if this one is already registered */
- curr = tmo_list;
- while (curr != tmo && curr != NULL)
- curr = (struct timeout *)curr->next;
-
- if (curr == NULL)
- {
- /* not found - add it */
- if (tmo_list == NULL)
- tick_add_task(timeout_tick); /* first one - add task */
-
- *(struct timeout **)&tmo->next = tmo_list;
- tmo_list = tmo;
- }
-
- tmo->callback = callback;
- tmo->ticks = ticks;
- tmo->data = data;
- *(long *)&tmo->expires = current_tick + ticks;
-
- set_irq_level(oldlevel);
-}
-
-#endif /* INCLUDE_TIMEOUT_API */
-
-/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
+
+/* Initialize a mutex object - call before any use and do not call again once
+ * the object is available to other threads */
void mutex_init(struct mutex *m)
{
+ corelock_init(&m->cl);
m->queue = NULL;
- m->thread = NULL;
m->count = 0;
m->locked = 0;
-#if CONFIG_CORELOCK == SW_CORELOCK
- corelock_init(&m->cl);
+ MUTEX_SET_THREAD(m, NULL);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ m->blocker.priority = PRIORITY_IDLE;
+ m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
+ m->no_preempt = false;
#endif
}
+/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
const unsigned int core = CURRENT_CORE;
- struct thread_entry *const thread = cores[core].running;
+ struct thread_entry *current = cores[core].running;
- if(thread == m->thread)
+ if(current == MUTEX_GET_THREAD(m))
{
+ /* current thread already owns this mutex */
m->count++;
return;
}
- /* Repeat some stuff here or else all the variation is too difficult to
- read */
-#if CONFIG_CORELOCK == CORELOCK_SWAP
- /* peek at lock until it's no longer busy */
- unsigned int locked;
- while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
- if(locked == 0)
- {
- m->thread = thread;
- m->locked = 1;
- return;
- }
-
- /* Block until the lock is open... */
- cores[core].blk_ops.flags = TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &m->locked;
- cores[core].blk_ops.var_u8v = 1;
-#else
+ /* lock out other cores */
corelock_lock(&m->cl);
- if (m->locked == 0)
+
+ if(m->locked == 0)
{
+ /* lock is open */
+ MUTEX_SET_THREAD(m, current);
m->locked = 1;
- m->thread = thread;
corelock_unlock(&m->cl);
return;
}
- /* Block until the lock is open... */
-#if CONFIG_CORELOCK == SW_CORELOCK
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &m->cl;
-#endif
-#endif /* CONFIG_CORELOCK */
+ /* block until the lock is open... */
+ IF_COP( current->obj_cl = &m->cl; )
+ IF_PRIO( current->blocker = &m->blocker; )
+ current->bqp = &m->queue;
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ block_thread(current);
- block_thread_no_listlock(&m->queue);
+ corelock_unlock(&m->cl);
+
+ /* ...and turn control over to next thread */
+ switch_thread();
}
+/* Release ownership of a mutex object - only owning thread must call this */
void mutex_unlock(struct mutex *m)
{
/* unlocker not being the owner is an unlocking violation */
- KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
- "mutex_unlock->wrong thread (recurse)");
+ KERNEL_ASSERT(MUTEX_GET_THREAD(m) == thread_get_current(),
+ "mutex_unlock->wrong thread (%s != %s)\n",
+ MUTEX_GET_THREAD(m)->name,
+ thread_get_current()->name);
if(m->count > 0)
{
@@ -1035,37 +1104,33 @@ void mutex_unlock(struct mutex *m)
return;
}
-#if CONFIG_CORELOCK == SW_CORELOCK
/* lock out other cores */
corelock_lock(&m->cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- /* wait for peeker to move on */
- while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
-#endif
/* transfer to next queued thread if any */
-
- /* This can become busy using SWP but is safe since only one thread
- will be changing things at a time. Allowing timeout waits will
- change that however but not now. There is also a hazard the thread
- could be killed before performing the wakeup but that's just
- irresponsible. :-) */
- m->thread = m->queue;
-
- if(m->thread == NULL)
+ if(m->queue == NULL)
{
- m->locked = 0; /* release lock */
-#if CONFIG_CORELOCK == SW_CORELOCK
+ /* no threads waiting - open the lock */
+ MUTEX_SET_THREAD(m, NULL);
+ m->locked = 0;
corelock_unlock(&m->cl);
-#endif
+ return;
}
- else /* another thread is waiting - remain locked */
+ else
{
- wakeup_thread_no_listlock(&m->queue);
-#if CONFIG_CORELOCK == SW_CORELOCK
+ const int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ /* Tranfer of owning thread is handled in the wakeup protocol
+ * if priorities are enabled otherwise just set it from the
+ * queue head. */
+ IFN_PRIO( MUTEX_SET_THREAD(m, m->queue); )
+ IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
+ set_irq_level(oldlevel);
+
corelock_unlock(&m->cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- m->locked = 1;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if((result & THREAD_SWITCH) && !m->no_preempt)
+ switch_thread();
#endif
}
}
@@ -1083,28 +1148,32 @@ void spinlock_init(struct spinlock *l)
void spinlock_lock(struct spinlock *l)
{
- struct thread_entry *const thread = cores[CURRENT_CORE].running;
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *current = cores[core].running;
- if (l->thread == thread)
+ if(l->thread == current)
{
+ /* current core already owns it */
l->count++;
return;
}
+ /* lock against other processor cores */
corelock_lock(&l->cl);
- l->thread = thread;
+ /* take ownership */
+ l->thread = current;
}
void spinlock_unlock(struct spinlock *l)
{
/* unlocker not being the owner is an unlocking violation */
- KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
- "spinlock_unlock->wrong thread");
+ KERNEL_ASSERT(l->thread == thread_get_current(),
+ "spinlock_unlock->wrong thread\n");
- if (l->count > 0)
+ if(l->count > 0)
{
- /* this thread still owns lock */
+ /* this core still owns lock */
l->count--;
return;
}
@@ -1124,76 +1193,62 @@ void spinlock_unlock(struct spinlock *l)
void semaphore_init(struct semaphore *s, int max, int start)
{
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
- "semaphore_init->inv arg");
+ "semaphore_init->inv arg\n");
s->queue = NULL;
s->max = max;
s->count = start;
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_init(&s->cl);
-#endif
}
void semaphore_wait(struct semaphore *s)
{
-#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ struct thread_entry *current;
+
corelock_lock(&s->cl);
+
if(--s->count >= 0)
{
+ /* wait satisfied */
corelock_unlock(&s->cl);
return;
}
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- int count;
- while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
- if(--count >= 0)
- {
- s->count = count;
- return;
- }
-#endif
- /* too many waits - block until dequeued */
-#if CONFIG_CORELOCK == SW_CORELOCK
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &s->cl;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_SET_VARi;
- cores[core].blk_ops.var_ip = &s->count;
- cores[core].blk_ops.var_iv = count;
-#endif
- block_thread_no_listlock(&s->queue);
+ /* too many waits - block until dequeued... */
+ current = cores[CURRENT_CORE].running;
+
+ IF_COP( current->obj_cl = &s->cl; )
+ current->bqp = &s->queue;
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ block_thread(current);
+
+ corelock_unlock(&s->cl);
+
+ /* ...and turn control over to next thread */
+ switch_thread();
}
void semaphore_release(struct semaphore *s)
{
-#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ IF_PRIO( unsigned int result = THREAD_NONE; )
+
corelock_lock(&s->cl);
- if (s->count < s->max)
- {
- if (++s->count <= 0)
- {
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- int count;
- while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
- if(count < s->max)
- {
- if(++count <= 0)
- {
-#endif /* CONFIG_CORELOCK */
- /* there should be threads in this queue */
- KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup");
- /* a thread was queued - wake it up */
- wakeup_thread_no_listlock(&s->queue);
- }
+ if(s->count < s->max && ++s->count <= 0)
+ {
+ /* there should be threads in this queue */
+ KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup\n");
+ /* a thread was queued - wake it up */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ IF_PRIO( result = ) wakeup_thread(&s->queue);
+ set_irq_level(oldlevel);
}
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_unlock(&s->cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- s->count = count;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if(result & THREAD_SWITCH)
+ switch_thread();
#endif
}
#endif /* HAVE_SEMAPHORE_OBJECTS */
@@ -1208,117 +1263,107 @@ void event_init(struct event *e, unsigned int flags)
e->queues[STATE_SIGNALED] = NULL;
e->state = flags & STATE_SIGNALED;
e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_init(&e->cl);
-#endif
}
void event_wait(struct event *e, unsigned int for_state)
{
- unsigned int last_state;
-#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ struct thread_entry *current;
+
corelock_lock(&e->cl);
- last_state = e->state;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
-#endif
if(e->automatic != 0)
{
/* wait for false always satisfied by definition
or if it just changed to false */
- if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
+ if(e->state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
{
/* automatic - unsignal */
e->state = STATE_NONSIGNALED;
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_unlock(&e->cl);
-#endif
return;
}
/* block until state matches */
}
- else if(for_state == last_state)
+ else if(for_state == e->state)
{
/* the state being waited for is the current state */
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_unlock(&e->cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- e->state = last_state;
-#endif
return;
}
- {
- /* current state does not match wait-for state */
-#if CONFIG_CORELOCK == SW_CORELOCK
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
- cores[core].blk_ops.cl_p = &e->cl;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- const unsigned int core = CURRENT_CORE;
- cores[core].blk_ops.flags = TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &e->state;
- cores[core].blk_ops.var_u8v = last_state;
-#endif
- block_thread_no_listlock(&e->queues[for_state]);
- }
+ /* block until state matches what callers requests */
+ current = cores[CURRENT_CORE].running;
+
+ IF_COP( current->obj_cl = &e->cl; )
+ current->bqp = &e->queues[for_state];
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ block_thread(current);
+
+ corelock_unlock(&e->cl);
+
+ /* turn control over to next thread */
+ switch_thread();
}
void event_set_state(struct event *e, unsigned int state)
{
- unsigned int last_state;
-#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
+ unsigned int result;
+ int oldlevel;
+
corelock_lock(&e->cl);
- last_state = e->state;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
-#endif
- if(last_state == state)
+ if(e->state == state)
{
/* no change */
-#if CONFIG_CORELOCK == SW_CORELOCK
corelock_unlock(&e->cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- e->state = last_state;
-#endif
return;
}
+ IF_PRIO( result = THREAD_OK; )
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
if(state == STATE_SIGNALED)
{
if(e->automatic != 0)
{
- struct thread_entry *thread;
- /* no thread should have ever blocked for unsignaled */
+ /* no thread should have ever blocked for nonsignaled */
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
- "set_event_state->queue[NS]:S");
+ "set_event_state->queue[NS]:S\n");
/* pass to next thread and keep unsignaled - "pulse" */
- thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
- e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
+ result = wakeup_thread(&e->queues[STATE_SIGNALED]);
+ e->state = (result & THREAD_OK) ? STATE_NONSIGNALED : STATE_SIGNALED;
}
else
{
/* release all threads waiting for signaled */
- thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
e->state = STATE_SIGNALED;
+ IF_PRIO( result = )
+ thread_queue_wake(&e->queues[STATE_SIGNALED]);
}
}
else
{
- /* release all threads waiting for unsignaled */
+ /* release all threads waiting for nonsignaled */
/* no thread should have ever blocked if automatic */
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
- e->automatic == 0, "set_event_state->queue[NS]:NS");
+ e->automatic == 0, "set_event_state->queue[NS]:NS\n");
- thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
e->state = STATE_NONSIGNALED;
+ IF_PRIO( result = )
+ thread_queue_wake(&e->queues[STATE_NONSIGNALED]);
}
-#if CONFIG_CORELOCK == SW_CORELOCK
+ set_irq_level(oldlevel);
+
corelock_unlock(&e->cl);
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if(result & THREAD_SWITCH)
+ switch_thread();
#endif
}
#endif /* HAVE_EVENT_OBJECTS */
diff --git a/firmware/pcm_record.c b/firmware/pcm_record.c
index 1437b43..49da257 100644
--- a/firmware/pcm_record.c
+++ b/firmware/pcm_record.c
@@ -361,11 +361,12 @@ unsigned long pcm_rec_sample_rate(void)
void pcm_rec_init(void)
{
queue_init(&pcmrec_queue, true);
- queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send);
pcmrec_thread_p =
create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack),
0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
IF_COP(, CPU));
+ queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send,
+ pcmrec_thread_p);
} /* pcm_rec_init */
/** audio_* group **/
@@ -874,9 +875,9 @@ static void pcmrec_flush(unsigned flush_num)
logf("pcmrec: boost (%s)",
num >= flood_watermark ? "num" : "time");
prio_pcmrec = thread_set_priority(NULL,
- thread_get_priority(NULL) - 1);
+ thread_get_priority(NULL) - 4);
prio_codec = thread_set_priority(codec_thread_p,
- thread_get_priority(codec_thread_p) - 1);
+ thread_get_priority(codec_thread_p) - 4);
}
#endif
diff --git a/firmware/target/arm/ffs-arm.S b/firmware/target/arm/ffs-arm.S
new file mode 100644
index 0000000..bb888ab
--- /dev/null
+++ b/firmware/target/arm/ffs-arm.S
@@ -0,0 +1,74 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2008 by Michael Sevakis
+ *
+ * All files in this archive are subject to the GNU General Public License.
+ * See the file COPYING in the source tree root for full license agreement.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+ #include "config.h"
+
+/****************************************************************************
+ * int find_first_set_bit(uint32_t val);
+ *
+ * Find the index of the least significant set bit in the 32-bit word.
+ *
+ * return values:
+ * 0 - bit 0 is set
+ * 1 - bit 1 is set
+ * ...
+ * 31 - bit 31 is set
+ * 32 - no bits set
+ ****************************************************************************/
+ .align 2
+ .global find_first_set_bit
+ .type find_first_set_bit,%function
+find_first_set_bit:
+ @ Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry
+ rsb r2, r0, #0 @ r1 = r0 & -r0
+ ands r1, r0, r2 @
+
+ @ now r1 has at most one set bit, call this X
+
+#if ARM_ARCH >= 5
+ clz r0, r1 @ Get lead 0's count
+ rsbne r0, r0, #31 @ lead 0's -> bit index
+ bx lr @
+#else
+ @ this is the ffs algorithm devised by D.Seal and posted to
+ @ comp.sys.arm on 16 Feb 1994.
+ @
+ @ Output modified to suit Rockbox purposes.
+
+ adr r2, L_ffs_table
+ orrne r1, r1, r1, lsl #4 @ r1 = X * 0x11
+ orrne r1, r1, r1, lsl #6 @ r1 = X * 0x451
+ rsbne r1, r1, r1, lsl #16 @ r1 = X * 0x0450fbaf
+
+ @ now lookup in table indexed on top 6 bits of r1
+ ldrb r0, [ r2, r1, lsr #26 ] @
+ bx lr @
+
+L_ffs_table:
+ @ 0 1 2 3 4 5 6 7
+ @----------------------------------------------
+ .byte 32, 0, 1, 12, 2, 6, 0, 13 @ 0- 7
+ .byte 3, 0, 7, 0, 0, 0, 0, 14 @ 8-15
+ .byte 10, 4, 0, 0, 8, 0, 0, 25 @ 16-23
+ .byte 0, 0, 0, 0, 0, 21, 27, 15 @ 24-31
+ .byte 31, 11, 5, 0, 0, 0, 0, 0 @ 32-39
+ .byte 9, 0, 0, 24, 0, 0, 20, 26 @ 40-47
+ .byte 30, 0, 0, 0, 0, 23, 0, 19 @ 48-55
+ .byte 29, 0, 22, 18, 28, 17, 16, 0 @ 56-63
+#endif
+ .size find_first_set_bit, .-find_first_set_bit
diff --git a/firmware/target/arm/i2c-pp.c b/firmware/target/arm/i2c-pp.c
index 092a59b..1cfbfae 100644
--- a/firmware/target/arm/i2c-pp.c
+++ b/firmware/target/arm/i2c-pp.c
@@ -45,7 +45,7 @@ static int pp_i2c_wait_not_busy(void)
if (!(I2C_STATUS & I2C_BUSY)) {
return 0;
}
- priority_yield();
+ yield();
}
return -1;
diff --git a/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c b/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
index 1f5c5c8..f5d37ba 100644
--- a/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
+++ b/firmware/target/arm/s3c2440/gigabeat-fx/ata-meg-fx.c
@@ -128,7 +128,7 @@ void copy_read_sectors(unsigned char* buf, int wordcount)
/* Wait for transfer to complete */
while((DSTAT0 & 0x000fffff))
- priority_yield();
+ yield();
/* Dump cache for the buffer */
}
#endif
diff --git a/firmware/target/arm/sandisk/ata-c200_e200.c b/firmware/target/arm/sandisk/ata-c200_e200.c
index c1c0cb3..747cb17 100644
--- a/firmware/target/arm/sandisk/ata-c200_e200.c
+++ b/firmware/target/arm/sandisk/ata-c200_e200.c
@@ -198,7 +198,7 @@ static bool sd_poll_status(unsigned int trigger, long timeout)
if (TIME_AFTER(time, next_yield))
{
long ty = USEC_TIMER;
- priority_yield();
+ yield();
timeout += USEC_TIMER - ty;
next_yield = ty + MIN_YIELD_PERIOD;
}
@@ -317,7 +317,7 @@ static int sd_wait_for_state(unsigned int state, int id)
us = USEC_TIMER;
if (TIME_AFTER(us, next_yield))
{
- priority_yield();
+ yield();
timeout += USEC_TIMER - us;
next_yield = us + MIN_YIELD_PERIOD;
}
diff --git a/firmware/target/coldfire/ffs-coldfire.S b/firmware/target/coldfire/ffs-coldfire.S
new file mode 100644
index 0000000..4f21013
--- /dev/null
+++ b/firmware/target/coldfire/ffs-coldfire.S
@@ -0,0 +1,62 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2008 by Michael Sevakis
+ *
+ * All files in this archive are subject to the GNU General Public License.
+ * See the file COPYING in the source tree root for full license agreement.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+ #include "config.h"
+
+/****************************************************************************
+ * int find_first_set_bit(uint32_t val);
+ *
+ * Find the index of the least significant set bit in the 32-bit word.
+ *
+ * return values:
+ * 0 - bit 0 is set
+ * 1 - bit 1 is set
+ * ...
+ * 31 - bit 31 is set
+ * 32 - no bits set
+ ****************************************************************************/
+ .text
+ .align 2
+ .global find_first_set_bit
+ .type find_first_set_bit,@function
+find_first_set_bit:
+ | this is a coldfire version of the ffs algorithm devised by D.Seal
+ | and posted to comp.sys.arm on 16 Feb 1994.
+ |
+ | Output modified to suit rockbox purposes.
+
+ | Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry
+ move.l 4(%sp), %d1 | %d1 = %d1 & -%d1
+ lea.l L_ffs_table, %a0 | %a0 = table address
+ move.l %d1, %d0 |
+ neg.l %d1 |
+ and.l %d0, %d1 |
+
+ | now %d1 has at most one set bit, call this X
+
+ move.l #0x0450fbaf, %d0 | %d0 = multiplier
+ mulu.l %d0, %d1 | %d1 = X * 0x0450fbaf
+
+ | now lookup in table indexed on top 6 bits of %d0
+ moveq.l #26, %d0 | %d0 = final shift count
+ lsr.l %d0, %d1 |
+ clr.l %d0 |
+ move.b (%a0, %d1.l), %d0 |
+ rts |
+
+ .size find_first_set_bit, .-find_first_set_bit
diff --git a/firmware/thread.c b/firmware/thread.c
index 8bebfed..259a66a 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -28,6 +28,10 @@
#ifdef RB_PROFILE
#include <profile.h>
#endif
+/****************************************************************************
+ * ATTENTION!! *
+ * See notes below on implementing processor-specific portions! *
+ ***************************************************************************/
/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
#ifdef DEBUG
@@ -59,9 +63,7 @@
* event queues. The kernel object must have a scheme to protect itself from
* access by another processor and is responsible for serializing the calls
* to block_thread(_w_tmo) and wakeup_thread both to themselves and to each
- * other. If a thread blocks on an object it must fill-in the blk_ops members
- * for its core to unlock _after_ the thread's context has been saved and the
- * unlocking will be done in reverse from this heirarchy.
+ * other. Objects' queues are also protected here.
*
* 3) Thread Slot
* This locks access to the thread's slot such that its state cannot be
@@ -70,70 +72,66 @@
* a thread while it is still blocking will likely desync its state with
* the other resources used for that state.
*
- * 4) Lists
- * Usually referring to a list (aka. queue) that a thread will be blocking
- * on that belongs to some object and is shareable amongst multiple
- * processors. Parts of the scheduler may have access to them without actually
- * locking the kernel object such as when a thread is blocked with a timeout
- * (such as calling queue_wait_w_tmo). Of course the kernel object also gets
- * it lists locked when the thread blocks so that all object list access is
- * synchronized. Failure to do so would corrupt the list links.
- *
- * 5) Core Lists
+ * 4) Core Lists
* These lists are specific to a particular processor core and are accessible
- * by all processor cores and interrupt handlers. They are used when an
- * operation may only be performed by the thread's own core in a normal
- * execution context. The wakeup list is the prime example where a thread
- * may be added by any means and the thread's own core will remove it from
- * the wakeup list and put it on the running list (which is only ever
- * accessible by its own processor).
- */
-#define DEADBEEF ((unsigned int)0xdeadbeef)
-/* Cast to the the machine int type, whose size could be < 4. */
-struct core_entry cores[NUM_CORES] IBSS_ATTR;
-struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
-
-static const char main_thread_name[] = "main";
-extern int stackbegin[];
-extern int stackend[];
+ * by all processor cores and interrupt handlers. The running (rtr) list is
+ * the prime example where a thread may be added by any means.
+ */
-/* core_sleep procedure to implement for any CPU to ensure an asychronous wakup
- * never results in requiring a wait until the next tick (up to 10000uS!). May
- * require assembly and careful instruction ordering.
+/*---------------------------------------------------------------------------
+ * Processor specific: core_sleep/core_wake/misc. notes
+ *
+ * ARM notes:
+ * FIQ is not dealt with by the scheduler code and is simply restored if it
+ * must by masked for some reason - because threading modifies a register
+ * that FIQ may also modify and there's no way to accomplish it atomically.
+ * s3c2440 is such a case.
+ *
+ * Audio interrupts are generally treated at a higher priority than others
+ * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
+ * are not in general safe. Special cases may be constructed on a per-
+ * source basis and blocking operations are not available.
+ *
+ * core_sleep procedure to implement for any CPU to ensure an asychronous
+ * wakup never results in requiring a wait until the next tick (up to
+ * 10000uS!). May require assembly and careful instruction ordering.
*
- * 1) On multicore, stay awake if directed to do so by another. If so, goto step 4.
- * 2) If processor requires, atomically reenable interrupts and perform step 3.
- * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000 on Coldfire)
- * goto step 5.
+ * 1) On multicore, stay awake if directed to do so by another. If so, goto
+ * step 4.
+ * 2) If processor requires, atomically reenable interrupts and perform step
+ * 3.
+ * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
+ * on Coldfire) goto step 5.
* 4) Enable interrupts.
* 5) Exit procedure.
+ *
+ * core_wake and multprocessor notes for sleep/wake coordination:
+ * If possible, to wake up another processor, the forcing of an interrupt on
+ * the woken core by the waker core is the easiest way to ensure a non-
+ * delayed wake and immediate execution of any woken threads. If that isn't
+ * available then some careful non-blocking synchonization is needed (as on
+ * PP targets at the moment).
+ *---------------------------------------------------------------------------
*/
-static inline void core_sleep(IF_COP_VOID(unsigned int core))
- __attribute__((always_inline));
-
-static void check_tmo_threads(void)
- __attribute__((noinline));
-static inline void block_thread_on_l(
- struct thread_queue *list, struct thread_entry *thread, unsigned state)
- __attribute__((always_inline));
+/* Cast to the the machine pointer size, whose size could be < 4 or > 32
+ * (someday :). */
+#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
+struct core_entry cores[NUM_CORES] IBSS_ATTR;
+struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
-static inline void block_thread_on_l_no_listlock(
- struct thread_entry **list, struct thread_entry *thread, unsigned state)
- __attribute__((always_inline));
+static const char main_thread_name[] = "main";
+extern uintptr_t stackbegin[];
+extern uintptr_t stackend[];
-static inline void _block_thread_on_l(
- struct thread_queue *list, struct thread_entry *thread,
- unsigned state IF_SWCL(, const bool single))
+static inline void core_sleep(IF_COP_VOID(unsigned int core))
__attribute__((always_inline));
-IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
- struct thread_queue *list IF_SWCL(, const bool nolock))
- __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
+void check_tmo_threads(void)
+ __attribute__((noinline));
-IF_SWCL(static inline) void _block_thread(
- struct thread_queue *list IF_SWCL(, const bool nolock))
- __attribute__((IFN_SWCL(noinline) IF_SWCL(always_inline)));
+static inline void block_thread_on_l(struct thread_entry *thread, unsigned state)
+ __attribute__((always_inline));
static void add_to_list_tmo(struct thread_entry *thread)
__attribute__((noinline));
@@ -141,9 +139,6 @@ static void add_to_list_tmo(struct thread_entry *thread)
static void core_schedule_wakeup(struct thread_entry *thread)
__attribute__((noinline));
-static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
- __attribute__((always_inline));
-
#if NUM_CORES > 1
static inline void run_blocking_ops(
unsigned int core, struct thread_entry *thread)
@@ -159,10 +154,9 @@ static inline void store_context(void* addr)
static inline void load_context(const void* addr)
__attribute__((always_inline));
-void switch_thread(struct thread_entry *old)
+void switch_thread(void)
__attribute__((noinline));
-
/****************************************************************************
* Processor-specific section
*/
@@ -172,8 +166,7 @@ void switch_thread(struct thread_entry *old)
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
-static void start_thread(void) __attribute__((naked,used));
-static void start_thread(void)
+static void __attribute__((naked,used)) start_thread(void)
{
/* r0 = context */
asm volatile (
@@ -188,19 +181,18 @@ static void start_thread(void)
#endif
"mov lr, pc \n" /* Call thread function */
"bx r4 \n"
- "mov r0, #0 \n" /* remove_thread(NULL) */
- "ldr pc, =remove_thread \n"
- ".ltorg \n" /* Dump constant pool */
); /* No clobber list - new thread doesn't care */
+ thread_exit();
+ //asm volatile (".ltorg"); /* Dump constant pool */
}
/* For startup, place context pointer in r4 slot, start_thread pointer in r5
* slot, and thread function pointer in context.start. See load_context for
* what happens when thread is initially going to run. */
#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
- (thread)->context.r[1] = (unsigned int)start_thread, \
- (thread)->context.start = (void *)function; })
+ ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.r[1] = (uint32_t)start_thread, \
+ (thread)->context.start = (uint32_t)function; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
@@ -232,11 +224,11 @@ static inline void load_context(const void* addr)
#if defined (CPU_PP)
#if NUM_CORES > 1
-extern int cpu_idlestackbegin[];
-extern int cpu_idlestackend[];
-extern int cop_idlestackbegin[];
-extern int cop_idlestackend[];
-static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
+extern uintptr_t cpu_idlestackbegin[];
+extern uintptr_t cpu_idlestackend[];
+extern uintptr_t cop_idlestackbegin[];
+extern uintptr_t cop_idlestackend[];
+static uintptr_t * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
{
[CPU] = cpu_idlestackbegin,
[COP] = cop_idlestackbegin
@@ -253,7 +245,7 @@ struct core_semaphores
};
static struct core_semaphores core_semaphores[NUM_CORES] NOCACHEBSS_ATTR;
-#endif
+#endif /* CONFIG_CPU == PP5002 */
#endif /* NUM_CORES */
@@ -401,15 +393,15 @@ void corelock_unlock(struct corelock *cl)
* no other core requested a wakeup for it to perform a task.
*---------------------------------------------------------------------------
*/
+#ifdef CPU_PP502x
#if NUM_CORES == 1
-/* Shared single-core build debugging version */
static inline void core_sleep(void)
{
PROC_CTL(CURRENT_CORE) = PROC_SLEEP;
nop; nop; nop;
- set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
+ set_irq_level(IRQ_ENABLED);
}
-#elif defined (CPU_PP502x)
+#else
static inline void core_sleep(unsigned int core)
{
#if 1
@@ -429,8 +421,8 @@ static inline void core_sleep(unsigned int core)
"ldr r1, [%[mbx], #0] \n"
"tst r1, r0, lsr #2 \n"
"bne 1b \n"
- "mrs r1, cpsr \n" /* Enable interrupts */
- "bic r1, r1, #0xc0 \n"
+ "mrs r1, cpsr \n" /* Enable IRQ */
+ "bic r1, r1, #0x80 \n"
"msr cpsr_c, r1 \n"
:
: [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE), [c]"r"(core)
@@ -452,11 +444,36 @@ static inline void core_sleep(unsigned int core)
/* Wait for other processor to finish wake procedure */
while (MBX_MSG_STAT & (0x1 << core));
- /* Enable IRQ, FIQ */
- set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
+ /* Enable IRQ */
+ set_irq_level(IRQ_ENABLED);
#endif /* ASM/C selection */
}
+#endif /* NUM_CORES */
#elif CONFIG_CPU == PP5002
+#if NUM_CORES == 1
+static inline void core_sleep(void)
+{
+ asm volatile (
+ /* Sleep: PP5002 crashes if the instruction that puts it to sleep is
+ * located at 0xNNNNNNN0. 4/8/C works. This sequence makes sure
+ * that the correct alternative is executed. Don't change the order
+ * of the next 4 instructions! */
+ "tst pc, #0x0c \n"
+ "mov r0, #0xca \n"
+ "strne r0, [%[ctl]] \n"
+ "streq r0, [%[ctl]] \n"
+ "nop \n" /* nop's needed because of pipeline */
+ "nop \n"
+ "nop \n"
+ "mrs r0, cpsr \n" /* Enable IRQ */
+ "bic r0, r0, #0x80 \n"
+ "msr cpsr_c, r0 \n"
+ :
+ : [ctl]"r"(&PROC_CTL(CURRENT_CORE))
+ : "r0"
+ );
+}
+#else
/* PP5002 has no mailboxes - emulate using bytes */
static inline void core_sleep(unsigned int core)
{
@@ -486,8 +503,8 @@ static inline void core_sleep(unsigned int core)
"ldrb r0, [%[sem], #0] \n"
"cmp r0, #0 \n"
"bne 1b \n"
- "mrs r0, cpsr \n" /* Enable interrupts */
- "bic r0, r0, #0xc0 \n"
+ "mrs r0, cpsr \n" /* Enable IRQ */
+ "bic r0, r0, #0x80 \n"
"msr cpsr_c, r0 \n"
:
: [sem]"r"(&core_semaphores[core]), [c]"r"(core),
@@ -512,11 +529,12 @@ static inline void core_sleep(unsigned int core)
/* Wait for other processor to finish wake procedure */
while (core_semaphores[core].intend_wake != 0);
- /* Enable IRQ, FIQ */
- set_interrupt_status(IRQ_FIQ_ENABLED, IRQ_FIQ_STATUS);
+ /* Enable IRQ */
+ set_irq_level(IRQ_ENABLED);
#endif /* ASM/C selection */
}
-#endif /* CPU type */
+#endif /* NUM_CORES */
+#endif /* PP CPU type */
/*---------------------------------------------------------------------------
* Wake another processor core that is sleeping or prevent it from doing so
@@ -553,7 +571,7 @@ void core_wake(unsigned int othercore)
"strne r1, [%[ctl], %[oc], lsl #2] \n"
"mov r1, r2, lsr #4 \n"
"str r1, [%[mbx], #8] \n" /* Done with wake procedure */
- "msr cpsr_c, r3 \n" /* Restore int status */
+ "msr cpsr_c, r3 \n" /* Restore IRQ */
:
: [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
[oc]"r"(othercore)
@@ -604,7 +622,7 @@ void core_wake(unsigned int othercore)
"strne r1, [r2, %[oc], lsl #2] \n"
"mov r1, #0 \n" /* Done with wake procedure */
"strb r1, [%[sem], #0] \n"
- "msr cpsr_c, r3 \n" /* Restore int status */
+ "msr cpsr_c, r3 \n" /* Restore IRQ */
:
: [sem]"r"(&core_semaphores[othercore]),
[st]"r"(&PROC_STAT),
@@ -640,8 +658,8 @@ void core_wake(unsigned int othercore)
*
* Needed when a thread suicides on a core other than the main CPU since the
* stack used when idling is the stack of the last thread to run. This stack
- * may not reside in the core in which case the core will continue to use a
- * stack from an unloaded module until another thread runs on it.
+ * may not reside in the core firmware in which case the core will continue
+ * to use a stack from an unloaded module until another thread runs on it.
*---------------------------------------------------------------------------
*/
static inline void switch_to_idle_stack(const unsigned int core)
@@ -670,11 +688,11 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
/* Flush our data to ram */
flush_icache();
/* Stash thread in r4 slot */
- thread->context.r[0] = (unsigned int)thread;
+ thread->context.r[0] = (uint32_t)thread;
/* Stash restart address in r5 slot */
- thread->context.r[1] = (unsigned int)thread->context.start;
+ thread->context.r[1] = thread->context.start;
/* Save sp in context.sp while still running on old core */
- thread->context.sp = (void*)idle_stacks[core][IDLE_STACK_WORDS-1];
+ thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
}
/*---------------------------------------------------------------------------
@@ -689,9 +707,8 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
/*---------------------------------------------------------------------------
* This actually performs the core switch.
*/
-static void switch_thread_core(unsigned int core, struct thread_entry *thread)
- __attribute__((naked));
-static void switch_thread_core(unsigned int core, struct thread_entry *thread)
+static void __attribute__((naked))
+ switch_thread_core(unsigned int core, struct thread_entry *thread)
{
/* Pure asm for this because compiler behavior isn't sufficiently predictable.
* Stack access also isn't permitted until restoring the original stack and
@@ -705,7 +722,6 @@ static void switch_thread_core(unsigned int core, struct thread_entry *thread)
"mov sp, r2 \n" /* switch stacks */
"adr r2, 1f \n" /* r2 = new core restart address */
"str r2, [r1, #40] \n" /* thread->context.start = r2 */
- "mov r0, r1 \n" /* switch_thread(thread) */
"ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
"1: \n"
"ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
@@ -733,13 +749,15 @@ static inline void core_sleep(void)
/* FIQ also changes the CLKCON register so FIQ must be disabled
when changing it here */
asm volatile (
- "mrs r0, cpsr \n" /* Prepare IRQ, FIQ enable */
- "bic r0, r0, #0xc0 \n"
+ "mrs r0, cpsr \n"
+ "orr r2, r0, #0x40 \n" /* Disable FIQ */
+ "bic r0, r0, #0x80 \n" /* Prepare IRQ enable */
+ "msr cpsr_c, r2 \n"
"mov r1, #0x4c000000 \n" /* CLKCON = 0x4c00000c */
"ldr r2, [r1, #0xc] \n" /* Set IDLE bit */
"orr r2, r2, #4 \n"
"str r2, [r1, #0xc] \n"
- "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
+ "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
"mov r2, #0 \n" /* wait for IDLE */
"1: \n"
"add r2, r2, #1 \n"
@@ -750,13 +768,14 @@ static inline void core_sleep(void)
"ldr r2, [r1, #0xc] \n" /* Reset IDLE bit */
"bic r2, r2, #4 \n"
"str r2, [r1, #0xc] \n"
- "msr cpsr_c, r0 \n" /* Enable IRQ, FIQ */
+ "msr cpsr_c, r0 \n" /* Enable IRQ, restore FIQ */
: : : "r0", "r1", "r2");
}
#elif defined(CPU_TCC77X)
static inline void core_sleep(void)
{
#warning TODO: Implement core_sleep
+ set_irq_level(IRQ_ENABLED);
}
#elif defined(CPU_TCC780X)
static inline void core_sleep(void)
@@ -765,8 +784,8 @@ static inline void core_sleep(void)
asm volatile (
"mov r0, #0 \n"
"mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
- "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
- "bic r0, r0, #0xc0 \n"
+ "mrs r0, cpsr \n" /* Unmask IRQ at core level */
+ "bic r0, r0, #0x80 \n"
"msr cpsr_c, r0 \n"
: : : "r0"
);
@@ -777,8 +796,8 @@ static inline void core_sleep(void)
asm volatile (
"mov r0, #0 \n"
"mcr p15, 0, r0, c7, c0, 4 \n" /* Wait for interrupt */
- "mrs r0, cpsr \n" /* Unmask IRQ/FIQ at core level */
- "bic r0, r0, #0xc0 \n"
+ "mrs r0, cpsr \n" /* Unmask IRQ at core level */
+ "bic r0, r0, #0x80 \n"
"msr cpsr_c, r0 \n"
: : : "r0"
);
@@ -787,6 +806,7 @@ static inline void core_sleep(void)
static inline void core_sleep(void)
{
#warning core_sleep not implemented, battery life will be decreased
+ set_irq_level(0);
}
#endif /* CONFIG_CPU == */
@@ -796,8 +816,7 @@ static inline void core_sleep(void)
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
-static void __start_thread(void) __attribute__((used));
-static void __start_thread(void)
+static void __attribute__((used)) __start_thread(void)
{
/* a0=macsr, a1=context */
asm volatile (
@@ -808,9 +827,8 @@ static void __start_thread(void)
"move.l (%a1), %a2 \n" /* Fetch thread function pointer */
"clr.l (%a1) \n" /* Mark thread running */
"jsr (%a2) \n" /* Call thread function */
- "clr.l -(%sp) \n" /* remove_thread(NULL) */
- "jsr remove_thread \n"
);
+ thread_exit();
}
/* Set EMAC unit to fractional mode with saturation for each new thread,
@@ -823,9 +841,9 @@ static void __start_thread(void)
*/
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
- (thread)->context.d[0] = (unsigned int)&(thread)->context, \
- (thread)->context.d[1] = (unsigned int)start_thread, \
- (thread)->context.start = (void *)(function); })
+ (thread)->context.d[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.d[1] = (uint32_t)start_thread, \
+ (thread)->context.start = (uint32_t)(function); })
/*---------------------------------------------------------------------------
* Store non-volatile context.
@@ -874,8 +892,7 @@ static inline void core_sleep(void)
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
-static void __start_thread(void) __attribute__((used));
-static void __start_thread(void)
+static void __attribute__((used)) __start_thread(void)
{
/* r8 = context */
asm volatile (
@@ -885,20 +902,16 @@ static void __start_thread(void)
"mov #0, r1 \n" /* Start the thread */
"jsr @r0 \n"
"mov.l r1, @(36, r8) \n" /* Clear start address */
- "mov.l 1f, r0 \n" /* remove_thread(NULL) */
- "jmp @r0 \n"
- "mov #0, r4 \n"
- "1: \n"
- ".long _remove_thread \n"
);
+ thread_exit();
}
/* Place context pointer in r8 slot, function pointer in r9 slot, and
* start_thread pointer in context_start */
#define THREAD_STARTUP_INIT(core, thread, function) \
- ({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
- (thread)->context.r[1] = (unsigned int)(function), \
- (thread)->context.start = (void*)start_thread; })
+ ({ (thread)->context.r[0] = (uint32_t)&(thread)->context, \
+ (thread)->context.r[1] = (uint32_t)(function), \
+ (thread)->context.start = (uint32_t)start_thread; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
@@ -947,7 +960,7 @@ static inline void load_context(const void* addr)
}
/*---------------------------------------------------------------------------
- * Put core in a power-saving state if waking list wasn't repopulated.
+ * Put core in a power-saving state.
*---------------------------------------------------------------------------
*/
static inline void core_sleep(void)
@@ -969,9 +982,7 @@ static inline void core_sleep(void)
#if THREAD_EXTRA_CHECKS
static void thread_panicf(const char *msg, struct thread_entry *thread)
{
-#if NUM_CORES > 1
- const unsigned int core = thread->core;
-#endif
+ IF_COP( const unsigned int core = thread->core; )
static char name[32];
thread_get_name(name, 32, thread);
panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
@@ -987,9 +998,7 @@ static void thread_stkov(struct thread_entry *thread)
#else
static void thread_stkov(struct thread_entry *thread)
{
-#if NUM_CORES > 1
- const unsigned int core = thread->core;
-#endif
+ IF_COP( const unsigned int core = thread->core; )
static char name[32];
thread_get_name(name, 32, thread);
panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
@@ -998,111 +1007,67 @@ static void thread_stkov(struct thread_entry *thread)
#define THREAD_ASSERT(exp, msg, thread)
#endif /* THREAD_EXTRA_CHECKS */
-/*---------------------------------------------------------------------------
- * Lock a list pointer and returns its value
- *---------------------------------------------------------------------------
- */
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* Separate locking function versions */
-
/* Thread locking */
-#define GET_THREAD_STATE(thread) \
- ({ corelock_lock(&(thread)->cl); (thread)->state; })
-#define TRY_GET_THREAD_STATE(thread) \
- ({ corelock_try_lock(&thread->cl) ? thread->state : STATE_BUSY; })
-#define UNLOCK_THREAD(thread, state) \
- ({ corelock_unlock(&(thread)->cl); })
-#define UNLOCK_THREAD_SET_STATE(thread, _state) \
- ({ (thread)->state = (_state); corelock_unlock(&(thread)->cl); })
-
-/* List locking */
-#define LOCK_LIST(tqp) \
- ({ corelock_lock(&(tqp)->cl); (tqp)->queue; })
-#define UNLOCK_LIST(tqp, mod) \
- ({ corelock_unlock(&(tqp)->cl); })
-#define UNLOCK_LIST_SET_PTR(tqp, mod) \
- ({ (tqp)->queue = (mod); corelock_unlock(&(tqp)->cl); })
-
-/* Select the queue pointer directly */
-#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
- ({ add_to_list_l(&(tqp)->queue, (thread)); })
-#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
- ({ remove_from_list_l(&(tqp)->queue, (thread)); })
-
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
-/* Native swap/exchange versions */
+#if NUM_CORES > 1
+#define LOCK_THREAD(thread) \
+ ({ corelock_lock(&(thread)->slot_cl); })
+#define TRY_LOCK_THREAD(thread) \
+ ({ corelock_try_lock(&thread->slot_cl); })
+#define UNLOCK_THREAD(thread) \
+ ({ corelock_unlock(&(thread)->slot_cl); })
+#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
+ ({ unsigned int _core = (thread)->core; \
+ cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
+ cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
+#else
+#define LOCK_THREAD(thread) \
+ ({ })
+#define TRY_LOCK_THREAD(thread) \
+ ({ })
+#define UNLOCK_THREAD(thread) \
+ ({ })
+#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
+ ({ })
+#endif
+
+/* RTR list */
+#define RTR_LOCK(core) \
+ ({ corelock_lock(&cores[core].rtr_cl); })
+#define RTR_UNLOCK(core) \
+ ({ corelock_unlock(&cores[core].rtr_cl); })
-/* Thread locking */
-#define GET_THREAD_STATE(thread) \
- ({ unsigned _s; \
- while ((_s = xchg8(&(thread)->state, STATE_BUSY)) == STATE_BUSY); \
- _s; })
-#define TRY_GET_THREAD_STATE(thread) \
- ({ xchg8(&(thread)->state, STATE_BUSY); })
-#define UNLOCK_THREAD(thread, _state) \
- ({ (thread)->state = (_state); })
-#define UNLOCK_THREAD_SET_STATE(thread, _state) \
- ({ (thread)->state = (_state); })
-
-/* List locking */
-#define LOCK_LIST(tqp) \
- ({ struct thread_entry *_l; \
- while((_l = xchgptr(&(tqp)->queue, STATE_BUSYuptr)) == STATE_BUSYuptr); \
- _l; })
-#define UNLOCK_LIST(tqp, mod) \
- ({ (tqp)->queue = (mod); })
-#define UNLOCK_LIST_SET_PTR(tqp, mod) \
- ({ (tqp)->queue = (mod); })
-
-/* Select the local queue pointer copy returned from LOCK_LIST */
-#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
- ({ add_to_list_l(&(tc), (thread)); })
-#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
- ({ remove_from_list_l(&(tc), (thread)); })
+#ifdef HAVE_PRIORITY_SCHEDULING
+#define rtr_add_entry(core, priority) \
+ prio_add_entry(&cores[core].rtr, (priority))
+#define rtr_subtract_entry(core, priority) \
+ prio_subtract_entry(&cores[core].rtr, (priority))
+
+#define rtr_move_entry(core, from, to) \
+ prio_move_entry(&cores[core].rtr, (from), (to))
#else
-/* Single-core/non-locked versions */
-
-/* Threads */
-#define GET_THREAD_STATE(thread) \
- ({ (thread)->state; })
-#define UNLOCK_THREAD(thread, _state)
-#define UNLOCK_THREAD_SET_STATE(thread, _state) \
- ({ (thread)->state = (_state); })
-
-/* Lists */
-#define LOCK_LIST(tqp) \
- ({ (tqp)->queue; })
-#define UNLOCK_LIST(tqp, mod)
-#define UNLOCK_LIST_SET_PTR(tqp, mod) \
- ({ (tqp)->queue = (mod); })
-
-/* Select the queue pointer directly */
-#define ADD_TO_LIST_L_SELECT(tc, tqp, thread) \
- ({ add_to_list_l(&(tqp)->queue, (thread)); })
-#define REMOVE_FROM_LIST_L_SELECT(tc, tqp, thread) \
- ({ remove_from_list_l(&(tqp)->queue, (thread)); })
-
-#endif /* locking selection */
+#define rtr_add_entry(core, priority)
+#define rtr_add_entry_inl(core, priority)
+#define rtr_subtract_entry(core, priority)
+#define rtr_subtract_entry_inl(core, priotity)
+#define rtr_move_entry(core, from, to)
+#define rtr_move_entry_inl(core, from, to)
+#endif
-#if THREAD_EXTRA_CHECKS
/*---------------------------------------------------------------------------
- * Lock the thread slot to obtain the state and then unlock it. Waits for
- * it not to be busy. Used for debugging.
+ * Thread list structure - circular:
+ * +------------------------------+
+ * | |
+ * +--+---+<-+---+<-+---+<-+---+<-+
+ * Head->| T | | T | | T | | T |
+ * +->+---+->+---+->+---+->+---+--+
+ * | |
+ * +------------------------------+
*---------------------------------------------------------------------------
*/
-static unsigned peek_thread_state(struct thread_entry *thread)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- unsigned state = GET_THREAD_STATE(thread);
- UNLOCK_THREAD(thread, state);
- set_irq_level(oldlevel);
- return state;
-}
-#endif /* THREAD_EXTRA_CHECKS */
/*---------------------------------------------------------------------------
- * Adds a thread to a list of threads using "intert last". Uses the "l"
+ * Adds a thread to a list of threads using "insert last". Uses the "l"
* links.
*---------------------------------------------------------------------------
*/
@@ -1114,44 +1079,18 @@ static void add_to_list_l(struct thread_entry **list,
if (l == NULL)
{
/* Insert into unoccupied list */
- thread->l.next = thread;
thread->l.prev = thread;
+ thread->l.next = thread;
*list = thread;
return;
}
/* Insert last */
- thread->l.next = l;
thread->l.prev = l->l.prev;
- thread->l.prev->l.next = thread;
+ thread->l.next = l;
+ l->l.prev->l.next = thread;
l->l.prev = thread;
-
- /* Insert next
- thread->l.next = l->l.next;
- thread->l.prev = l;
- thread->l.next->l.prev = thread;
- l->l.next = thread;
- */
-}
-
-/*---------------------------------------------------------------------------
- * Locks a list, adds the thread entry and unlocks the list on multicore.
- * Defined as add_to_list_l on single-core.
- *---------------------------------------------------------------------------
- */
-#if NUM_CORES > 1
-static void add_to_list_l_locked(struct thread_queue *tq,
- struct thread_entry *thread)
-{
- struct thread_entry *t = LOCK_LIST(tq);
- ADD_TO_LIST_L_SELECT(t, tq, thread);
- UNLOCK_LIST(tq, t);
- (void)t;
}
-#else
-#define add_to_list_l_locked(tq, thread) \
- add_to_list_l(&(tq)->queue, (thread))
-#endif
/*---------------------------------------------------------------------------
* Removes a thread from a list of threads. Uses the "l" links.
@@ -1180,28 +1119,20 @@ static void remove_from_list_l(struct thread_entry **list,
prev = thread->l.prev;
/* Fix links to jump over the removed entry. */
- prev->l.next = next;
next->l.prev = prev;
+ prev->l.next = next;
}
/*---------------------------------------------------------------------------
- * Locks a list, removes the thread entry and unlocks the list on multicore.
- * Defined as remove_from_list_l on single-core.
+ * Timeout list structure - circular reverse (to make "remove item" O(1)),
+ * NULL-terminated forward (to ease the far more common forward traversal):
+ * +------------------------------+
+ * | |
+ * +--+---+<-+---+<-+---+<-+---+<-+
+ * Head->| T | | T | | T | | T |
+ * +---+->+---+->+---+->+---+-X
*---------------------------------------------------------------------------
*/
-#if NUM_CORES > 1
-static void remove_from_list_l_locked(struct thread_queue *tq,
- struct thread_entry *thread)
-{
- struct thread_entry *t = LOCK_LIST(tq);
- REMOVE_FROM_LIST_L_SELECT(t, tq, thread);
- UNLOCK_LIST(tq, t);
- (void)t;
-}
-#else
-#define remove_from_list_l_locked(tq, thread) \
- remove_from_list_l(&(tq)->queue, (thread))
-#endif
/*---------------------------------------------------------------------------
* Add a thread from the core's timout list by linking the pointers in its
@@ -1210,19 +1141,24 @@ static void remove_from_list_l_locked(struct thread_queue *tq,
*/
static void add_to_list_tmo(struct thread_entry *thread)
{
- /* Insert first */
- struct thread_entry *t = cores[IF_COP_CORE(thread->core)].timeout;
+ struct thread_entry *tmo = cores[IF_COP_CORE(thread->core)].timeout;
+ THREAD_ASSERT(thread->tmo.prev == NULL,
+ "add_to_list_tmo->already listed", thread);
- thread->tmo.prev = thread;
- thread->tmo.next = t;
+ thread->tmo.next = NULL;
- if (t != NULL)
+ if (tmo == NULL)
{
- /* Fix second item's prev pointer to point to this thread */
- t->tmo.prev = thread;
+ /* Insert into unoccupied list */
+ thread->tmo.prev = thread;
+ cores[IF_COP_CORE(thread->core)].timeout = thread;
+ return;
}
- cores[IF_COP_CORE(thread->core)].timeout = thread;
+ /* Insert Last */
+ thread->tmo.prev = tmo->tmo.prev;
+ tmo->tmo.prev->tmo.next = thread;
+ tmo->tmo.prev = thread;
}
/*---------------------------------------------------------------------------
@@ -1233,91 +1169,520 @@ static void add_to_list_tmo(struct thread_entry *thread)
*/
static void remove_from_list_tmo(struct thread_entry *thread)
{
+ struct thread_entry **list = &cores[IF_COP_CORE(thread->core)].timeout;
+ struct thread_entry *prev = thread->tmo.prev;
struct thread_entry *next = thread->tmo.next;
- struct thread_entry *prev;
- if (thread == cores[IF_COP_CORE(thread->core)].timeout)
+ THREAD_ASSERT(prev != NULL, "remove_from_list_tmo->not listed", thread);
+
+ if (next != NULL)
+ next->tmo.prev = prev;
+
+ if (thread == *list)
+ {
+ /* List becomes next item and empty if next == NULL */
+ *list = next;
+ /* Mark as unlisted */
+ thread->tmo.prev = NULL;
+ }
+ else
+ {
+ if (next == NULL)
+ (*list)->tmo.prev = prev;
+ prev->tmo.next = next;
+ /* Mark as unlisted */
+ thread->tmo.prev = NULL;
+ }
+}
+
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+/*---------------------------------------------------------------------------
+ * Priority distribution structure (one category for each possible priority):
+ *
+ * +----+----+----+ ... +-----+
+ * hist: | F0 | F1 | F2 | | F31 |
+ * +----+----+----+ ... +-----+
+ * mask: | b0 | b1 | b2 | | b31 |
+ * +----+----+----+ ... +-----+
+ *
+ * F = count of threads at priority category n (frequency)
+ * b = bitmask of non-zero priority categories (occupancy)
+ *
+ * / if H[n] != 0 : 1
+ * b[n] = |
+ * \ else : 0
+ *
+ *---------------------------------------------------------------------------
+ * Basic priority inheritance priotocol (PIP):
+ *
+ * Mn = mutex n, Tn = thread n
+ *
+ * A lower priority thread inherits the priority of the highest priority
+ * thread blocked waiting for it to complete an action (such as release a
+ * mutex or respond to a message via queue_send):
+ *
+ * 1) T2->M1->T1
+ *
+ * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
+ * priority than T1 then T1 inherits the priority of T2.
+ *
+ * 2) T3
+ * \/
+ * T2->M1->T1
+ *
+ * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
+ * T1 inherits the higher of T2 and T3.
+ *
+ * 3) T3->M2->T2->M1->T1
+ *
+ * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
+ * then T1 inherits the priority of T3 through T2.
+ *
+ * Blocking chains can grow arbitrarily complex (though it's best that they
+ * not form at all very often :) and build-up from these units.
+ *---------------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------------------
+ * Increment frequency at category "priority"
+ *---------------------------------------------------------------------------
+ */
+static inline unsigned int prio_add_entry(
+ struct priority_distribution *pd, int priority)
+{
+ unsigned int count;
+ /* Enough size/instruction count difference for ARM makes it worth it to
+ * use different code (192 bytes for ARM). Only thing better is ASM. */
+#ifdef CPU_ARM
+ count = pd->hist[priority];
+ if (++count == 1)
+ pd->mask |= 1 << priority;
+ pd->hist[priority] = count;
+#else /* This one's better for Coldfire */
+ if ((count = ++pd->hist[priority]) == 1)
+ pd->mask |= 1 << priority;
+#endif
+
+ return count;
+}
+
+/*---------------------------------------------------------------------------
+ * Decrement frequency at category "priority"
+ *---------------------------------------------------------------------------
+ */
+static inline unsigned int prio_subtract_entry(
+ struct priority_distribution *pd, int priority)
+{
+ unsigned int count;
+
+#ifdef CPU_ARM
+ count = pd->hist[priority];
+ if (--count == 0)
+ pd->mask &= ~(1 << priority);
+ pd->hist[priority] = count;
+#else
+ if ((count = --pd->hist[priority]) == 0)
+ pd->mask &= ~(1 << priority);
+#endif
+
+ return count;
+}
+
+/*---------------------------------------------------------------------------
+ * Remove from one category and add to another
+ *---------------------------------------------------------------------------
+ */
+static inline void prio_move_entry(
+ struct priority_distribution *pd, int from, int to)
+{
+ uint32_t mask = pd->mask;
+
+#ifdef CPU_ARM
+ unsigned int count;
+
+ count = pd->hist[from];
+ if (--count == 0)
+ mask &= ~(1 << from);
+ pd->hist[from] = count;
+
+ count = pd->hist[to];
+ if (++count == 1)
+ mask |= 1 << to;
+ pd->hist[to] = count;
+#else
+ if (--pd->hist[from] == 0)
+ mask &= ~(1 << from);
+
+ if (++pd->hist[to] == 1)
+ mask |= 1 << to;
+#endif
+
+ pd->mask = mask;
+}
+
+/*---------------------------------------------------------------------------
+ * Change the priority and rtr entry for a running thread
+ *---------------------------------------------------------------------------
+ */
+static inline void set_running_thread_priority(
+ struct thread_entry *thread, int priority)
+{
+ const unsigned int core = IF_COP_CORE(thread->core);
+ RTR_LOCK(core);
+ rtr_move_entry(core, thread->priority, priority);
+ thread->priority = priority;
+ RTR_UNLOCK(core);
+}
+
+/*---------------------------------------------------------------------------
+ * Finds the highest priority thread in a list of threads. If the list is
+ * empty, the PRIORITY_IDLE is returned.
+ *
+ * It is possible to use the struct priority_distribution within an object
+ * instead of scanning the remaining threads in the list but as a compromise,
+ * the resulting per-object memory overhead is saved at a slight speed
+ * penalty under high contention.
+ *---------------------------------------------------------------------------
+ */
+static int find_highest_priority_in_list_l(
+ struct thread_entry * const thread)
+{
+ if (thread != NULL)
{
- /* Next item becomes list head */
- cores[IF_COP_CORE(thread->core)].timeout = next;
+ /* Go though list until the ending up at the initial thread */
+ int highest_priority = thread->priority;
+ struct thread_entry *curr = thread;
- if (next != NULL)
+ do
{
- /* Fix new list head's prev to point to itself. */
- next->tmo.prev = next;
+ int priority = curr->priority;
+
+ if (priority < highest_priority)
+ highest_priority = priority;
+
+ curr = curr->l.next;
}
+ while (curr != thread);
- thread->tmo.prev = NULL;
- return;
+ return highest_priority;
}
- prev = thread->tmo.prev;
+ return PRIORITY_IDLE;
+}
- if (next != NULL)
+/*---------------------------------------------------------------------------
+ * Register priority with blocking system and bubble it down the chain if
+ * any until we reach the end or something is already equal or higher.
+ *
+ * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
+ * targets but that same action also guarantees a circular block anyway and
+ * those are prevented, right? :-)
+ *---------------------------------------------------------------------------
+ */
+static struct thread_entry *
+ blocker_inherit_priority(struct thread_entry *current)
+{
+ const int priority = current->priority;
+ struct blocker *bl = current->blocker;
+ struct thread_entry * const tstart = current;
+ struct thread_entry *bl_t = bl->thread;
+
+ /* Blocker cannot change since the object protection is held */
+ LOCK_THREAD(bl_t);
+
+ for (;;)
{
- next->tmo.prev = prev;
+ struct thread_entry *next;
+ int bl_pr = bl->priority;
+
+ if (priority >= bl_pr)
+ break; /* Object priority already high enough */
+
+ bl->priority = priority;
+
+ /* Add this one */
+ prio_add_entry(&bl_t->pdist, priority);
+
+ if (bl_pr < PRIORITY_IDLE)
+ {
+ /* Not first waiter - subtract old one */
+ prio_subtract_entry(&bl_t->pdist, bl_pr);
+ }
+
+ if (priority >= bl_t->priority)
+ break; /* Thread priority high enough */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* Blocking thread is a running thread therefore there are no
+ * further blockers. Change the "run queue" on which it
+ * resides. */
+ set_running_thread_priority(bl_t, priority);
+ break;
+ }
+
+ bl_t->priority = priority;
+
+ /* If blocking thread has a blocker, apply transitive inheritance */
+ bl = bl_t->blocker;
+
+ if (bl == NULL)
+ break; /* End of chain or object doesn't support inheritance */
+
+ next = bl->thread;
+
+ if (next == tstart)
+ break; /* Full-circle - deadlock! */
+
+ UNLOCK_THREAD(current);
+
+#if NUM_CORES > 1
+ for (;;)
+ {
+ LOCK_THREAD(next);
+
+ /* Blocker could change - retest condition */
+ if (bl->thread == next)
+ break;
+
+ UNLOCK_THREAD(next);
+ next = bl->thread;
+ }
+#endif
+ current = bl_t;
+ bl_t = next;
}
- prev->tmo.next = next;
- thread->tmo.prev = NULL;
+ UNLOCK_THREAD(bl_t);
+
+ return current;
}
/*---------------------------------------------------------------------------
- * Schedules a thread wakeup on the specified core. Threads will be made
- * ready to run when the next task switch occurs. Note that this does not
- * introduce an on-core delay since the soonest the next thread may run is
- * no sooner than that. Other cores and on-core interrupts may only ever
- * add to the list.
+ * Readjust priorities when waking a thread blocked waiting for another
+ * in essence "releasing" the thread's effect on the object owner. Can be
+ * performed from any context.
*---------------------------------------------------------------------------
*/
-static void core_schedule_wakeup(struct thread_entry *thread)
+struct thread_entry *
+ wakeup_priority_protocol_release(struct thread_entry *thread)
{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- const unsigned int core = IF_COP_CORE(thread->core);
- add_to_list_l_locked(&cores[core].waking, thread);
+ const int priority = thread->priority;
+ struct blocker *bl = thread->blocker;
+ struct thread_entry * const tstart = thread;
+ struct thread_entry *bl_t = bl->thread;
+
+ /* Blocker cannot change since object will be locked */
+ LOCK_THREAD(bl_t);
+
+ thread->blocker = NULL; /* Thread not blocked */
+
+ for (;;)
+ {
+ struct thread_entry *next;
+ int bl_pr = bl->priority;
+
+ if (priority > bl_pr)
+ break; /* Object priority higher */
+
+ next = *thread->bqp;
+
+ if (next == NULL)
+ {
+ /* No more threads in queue */
+ prio_subtract_entry(&bl_t->pdist, bl_pr);
+ bl->priority = PRIORITY_IDLE;
+ }
+ else
+ {
+ /* Check list for highest remaining priority */
+ int queue_pr = find_highest_priority_in_list_l(next);
+
+ if (queue_pr == bl_pr)
+ break; /* Object priority not changing */
+
+ /* Change queue priority */
+ prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
+ bl->priority = queue_pr;
+ }
+
+ if (bl_pr > bl_t->priority)
+ break; /* thread priority is higher */
+
+ bl_pr = find_first_set_bit(bl_t->pdist.mask);
+
+ if (bl_pr == bl_t->priority)
+ break; /* Thread priority not changing */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* No further blockers */
+ set_running_thread_priority(bl_t, bl_pr);
+ break;
+ }
+
+ bl_t->priority = bl_pr;
+
+ /* If blocking thread has a blocker, apply transitive inheritance */
+ bl = bl_t->blocker;
+
+ if (bl == NULL)
+ break; /* End of chain or object doesn't support inheritance */
+
+ next = bl->thread;
+
+ if (next == tstart)
+ break; /* Full-circle - deadlock! */
+
+ UNLOCK_THREAD(thread);
+
#if NUM_CORES > 1
- if (core != CURRENT_CORE)
+ for (;;)
+ {
+ LOCK_THREAD(next);
+
+ /* Blocker could change - retest condition */
+ if (bl->thread == next)
+ break;
+
+ UNLOCK_THREAD(next);
+ next = bl->thread;
+ }
+#endif
+ thread = bl_t;
+ bl_t = next;
+ }
+
+ UNLOCK_THREAD(bl_t);
+
+#if NUM_CORES > 1
+ if (thread != tstart)
{
- core_wake(core);
+ /* Relock original if it changed */
+ LOCK_THREAD(tstart);
}
#endif
- set_irq_level(oldlevel);
+
+ return cores[CURRENT_CORE].running;
}
/*---------------------------------------------------------------------------
- * If the waking list was populated, move all threads on it onto the running
- * list so they may be run ASAP.
+ * Transfer ownership to a thread waiting for an objects and transfer
+ * inherited priority boost from other waiters. This algorithm knows that
+ * blocking chains may only unblock from the very end.
+ *
+ * Only the owning thread itself may call this and so the assumption that
+ * it is the running thread is made.
*---------------------------------------------------------------------------
*/
-static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
+struct thread_entry *
+ wakeup_priority_protocol_transfer(struct thread_entry *thread)
{
- struct thread_entry *w = LOCK_LIST(&cores[IF_COP_CORE(core)].waking);
- struct thread_entry *r = cores[IF_COP_CORE(core)].running;
+ /* Waking thread inherits priority boost from object owner */
+ struct blocker *bl = thread->blocker;
+ struct thread_entry *bl_t = bl->thread;
+ struct thread_entry *next;
+ int bl_pr;
- /* Tranfer all threads on waking list to running list in one
- swoop */
- if (r != NULL)
+ THREAD_ASSERT(thread_get_current() == bl_t,
+ "UPPT->wrong thread", thread_get_current());
+
+ LOCK_THREAD(bl_t);
+
+ bl_pr = bl->priority;
+
+ /* Remove the object's boost from the owning thread */
+ if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 &&
+ bl_pr <= bl_t->priority)
{
- /* Place waking threads at the end of the running list. */
- struct thread_entry *tmp;
- w->l.prev->l.next = r;
- r->l.prev->l.next = w;
- tmp = r->l.prev;
- r->l.prev = w->l.prev;
- w->l.prev = tmp;
+ /* No more threads at this priority are waiting and the old level is
+ * at least the thread level */
+ int priority = find_first_set_bit(bl_t->pdist.mask);
+
+ if (priority != bl_t->priority)
+ {
+ /* Adjust this thread's priority */
+ set_running_thread_priority(bl_t, priority);
+ }
+ }
+
+ next = *thread->bqp;
+
+ if (next == NULL)
+ {
+ /* Expected shortcut - no more waiters */
+ bl_pr = PRIORITY_IDLE;
}
else
{
- /* Just transfer the list as-is */
- cores[IF_COP_CORE(core)].running = w;
+ if (thread->priority <= bl_pr)
+ {
+ /* Need to scan threads remaining in queue */
+ bl_pr = find_highest_priority_in_list_l(next);
+ }
+
+ if (prio_add_entry(&thread->pdist, bl_pr) == 1 &&
+ bl_pr < thread->priority)
+ {
+ /* Thread priority must be raised */
+ thread->priority = bl_pr;
+ }
+ }
+
+ bl->thread = thread; /* This thread pwns */
+ bl->priority = bl_pr; /* Save highest blocked priority */
+ thread->blocker = NULL; /* Thread not blocked */
+
+ UNLOCK_THREAD(bl_t);
+
+ return bl_t;
+}
+
+/*---------------------------------------------------------------------------
+ * No threads must be blocked waiting for this thread except for it to exit.
+ * The alternative is more elaborate cleanup and object registration code.
+ * Check this for risk of silent data corruption when objects with
+ * inheritable blocking are abandoned by the owner - not precise but may
+ * catch something.
+ *---------------------------------------------------------------------------
+ */
+void check_for_obj_waiters(const char *function, struct thread_entry *thread)
+{
+ /* Only one bit in the mask should be set with a frequency on 1 which
+ * represents the thread's own base priority */
+ uint32_t mask = thread->pdist.mask;
+ if ((mask & (mask - 1)) != 0 ||
+ thread->pdist.hist[find_first_set_bit(mask)] > 1)
+ {
+ unsigned char name[32];
+ thread_get_name(name, 32, thread);
+ panicf("%s->%s with obj. waiters", function, name);
}
- /* Just leave any timeout threads on the timeout list. If a timeout check
- * is due, they will be removed there. If they do a timeout again before
- * being removed, they will just stay on the list with a new expiration
- * tick. */
+}
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
+/*---------------------------------------------------------------------------
+ * Move a thread back to a running state on its core.
+ *---------------------------------------------------------------------------
+ */
+static void core_schedule_wakeup(struct thread_entry *thread)
+{
+ const unsigned int core = IF_COP_CORE(thread->core);
+
+ RTR_LOCK(core);
+
+ thread->state = STATE_RUNNING;
+
+ add_to_list_l(&cores[core].running, thread);
+ rtr_add_entry(core, thread->priority);
+
+ RTR_UNLOCK(core);
- /* Waking list is clear - NULL and unlock it */
- UNLOCK_LIST_SET_PTR(&cores[IF_COP_CORE(core)].waking, NULL);
+#if NUM_CORES > 1
+ if (core != CURRENT_CORE)
+ core_wake(core);
+#endif
}
/*---------------------------------------------------------------------------
@@ -1326,7 +1691,7 @@ static inline void core_perform_wakeup(IF_COP_VOID(unsigned int core))
* tick when the next check will occur.
*---------------------------------------------------------------------------
*/
-static void check_tmo_threads(void)
+void check_tmo_threads(void)
{
const unsigned int core = CURRENT_CORE;
const long tick = current_tick; /* snapshot the current tick */
@@ -1335,54 +1700,98 @@ static void check_tmo_threads(void)
/* If there are no processes waiting for a timeout, just keep the check
tick from falling into the past. */
- if (next != NULL)
+
+ /* Break the loop once we have walked through the list of all
+ * sleeping processes or have removed them all. */
+ while (next != NULL)
{
- /* Check sleeping threads. */
- do
- {
- /* Must make sure noone else is examining the state, wait until
- slot is no longer busy */
- struct thread_entry *curr = next;
- next = curr->tmo.next;
+ /* Check sleeping threads. Allow interrupts between checks. */
+ set_irq_level(0);
- unsigned state = GET_THREAD_STATE(curr);
+ struct thread_entry *curr = next;
- if (state < TIMEOUT_STATE_FIRST)
- {
- /* Cleanup threads no longer on a timeout but still on the
- * list. */
- remove_from_list_tmo(curr);
- UNLOCK_THREAD(curr, state); /* Unlock thread slot */
- }
- else if (TIME_BEFORE(tick, curr->tmo_tick))
+ next = curr->tmo.next;
+
+ /* Lock thread slot against explicit wakeup */
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ LOCK_THREAD(curr);
+
+ unsigned state = curr->state;
+
+ if (state < TIMEOUT_STATE_FIRST)
+ {
+ /* Cleanup threads no longer on a timeout but still on the
+ * list. */
+ remove_from_list_tmo(curr);
+ }
+ else if (TIME_BEFORE(tick, curr->tmo_tick))
+ {
+ /* Timeout still pending - this will be the usual case */
+ if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
{
- /* Timeout still pending - this will be the usual case */
- if (TIME_BEFORE(curr->tmo_tick, next_tmo_check))
- {
- /* Earliest timeout found so far - move the next check up
- to its time */
- next_tmo_check = curr->tmo_tick;
- }
- UNLOCK_THREAD(curr, state); /* Unlock thread slot */
+ /* Earliest timeout found so far - move the next check up
+ to its time */
+ next_tmo_check = curr->tmo_tick;
}
- else
+ }
+ else
+ {
+ /* Sleep timeout has been reached so bring the thread back to
+ * life again. */
+ if (state == STATE_BLOCKED_W_TMO)
{
- /* Sleep timeout has been reached so bring the thread back to
- * life again. */
- if (state == STATE_BLOCKED_W_TMO)
+#if NUM_CORES > 1
+ /* Lock the waiting thread's kernel object */
+ struct corelock *ocl = curr->obj_cl;
+
+ if (corelock_try_lock(ocl) == 0)
{
- remove_from_list_l_locked(curr->bqp, curr);
+ /* Need to retry in the correct order though the need is
+ * unlikely */
+ UNLOCK_THREAD(curr);
+ corelock_lock(ocl);
+ LOCK_THREAD(curr);
+
+ if (curr->state != STATE_BLOCKED_W_TMO)
+ {
+ /* Thread was woken or removed explicitely while slot
+ * was unlocked */
+ corelock_unlock(ocl);
+ remove_from_list_tmo(curr);
+ UNLOCK_THREAD(curr);
+ continue;
+ }
}
+#endif /* NUM_CORES */
+
+ remove_from_list_l(curr->bqp, curr);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (curr->wakeup_ext_cb != NULL)
+ curr->wakeup_ext_cb(curr);
+#endif
- remove_from_list_tmo(curr);
- add_to_list_l(&cores[core].running, curr);
- UNLOCK_THREAD_SET_STATE(curr, STATE_RUNNING);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (curr->blocker != NULL)
+ wakeup_priority_protocol_release(curr);
+#endif
+ corelock_unlock(ocl);
}
+ /* else state == STATE_SLEEPING */
+
+ remove_from_list_tmo(curr);
+
+ RTR_LOCK(core);
- /* Break the loop once we have walked through the list of all
- * sleeping processes or have removed them all. */
+ curr->state = STATE_RUNNING;
+
+ add_to_list_l(&cores[core].running, curr);
+ rtr_add_entry(core, curr->priority);
+
+ RTR_UNLOCK(core);
}
- while (next != NULL);
+
+ UNLOCK_THREAD(curr);
}
cores[core].next_tmo_check = next_tmo_check;
@@ -1390,109 +1799,33 @@ static void check_tmo_threads(void)
/*---------------------------------------------------------------------------
* Performs operations that must be done before blocking a thread but after
- * the state is saved - follows reverse of locking order. blk_ops.flags is
- * assumed to be nonzero.
+ * the state is saved.
*---------------------------------------------------------------------------
*/
#if NUM_CORES > 1
static inline void run_blocking_ops(
unsigned int core, struct thread_entry *thread)
{
- struct thread_blk_ops *ops = &cores[IF_COP_CORE(core)].blk_ops;
+ struct thread_blk_ops *ops = &cores[core].blk_ops;
const unsigned flags = ops->flags;
- if (flags == 0)
+ if (flags == TBOP_CLEAR)
return;
- if (flags & TBOP_SWITCH_CORE)
+ switch (flags)
{
+ case TBOP_SWITCH_CORE:
core_switch_blk_op(core, thread);
- }
-
-#if CONFIG_CORELOCK == SW_CORELOCK
- if (flags & TBOP_UNLOCK_LIST)
- {
- UNLOCK_LIST(ops->list_p, NULL);
- }
-
- if (flags & TBOP_UNLOCK_CORELOCK)
- {
+ /* Fall-through */
+ case TBOP_UNLOCK_CORELOCK:
corelock_unlock(ops->cl_p);
- }
-
- if (flags & TBOP_UNLOCK_THREAD)
- {
- UNLOCK_THREAD(ops->thread, 0);
- }
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- /* Write updated variable value into memory location */
- switch (flags & TBOP_VAR_TYPE_MASK)
- {
- case TBOP_UNLOCK_LIST:
- UNLOCK_LIST(ops->list_p, ops->list_v);
- break;
- case TBOP_SET_VARi:
- *ops->var_ip = ops->var_iv;
- break;
- case TBOP_SET_VARu8:
- *ops->var_u8p = ops->var_u8v;
break;
}
-#endif /* CONFIG_CORELOCK == */
- /* Unlock thread's slot */
- if (flags & TBOP_UNLOCK_CURRENT)
- {
- UNLOCK_THREAD(thread, ops->state);
- }
-
- ops->flags = 0;
+ ops->flags = TBOP_CLEAR;
}
#endif /* NUM_CORES > 1 */
-
-/*---------------------------------------------------------------------------
- * Runs any operations that may cause threads to be ready to run and then
- * sleeps the processor core until the next interrupt if none are.
- *---------------------------------------------------------------------------
- */
-static inline struct thread_entry * sleep_core(IF_COP_VOID(unsigned int core))
-{
- for (;;)
- {
- set_irq_level(HIGHEST_IRQ_LEVEL);
- /* We want to do these ASAP as it may change the decision to sleep
- * the core or a core has woken because an interrupt occurred
- * and posted a message to a queue. */
- if (cores[IF_COP_CORE(core)].waking.queue != NULL)
- {
- core_perform_wakeup(IF_COP(core));
- }
-
- /* If there are threads on a timeout and the earliest wakeup is due,
- * check the list and wake any threads that need to start running
- * again. */
- if (!TIME_BEFORE(current_tick, cores[IF_COP_CORE(core)].next_tmo_check))
- {
- check_tmo_threads();
- }
-
- /* If there is a ready to run task, return its ID and keep core
- * awake. */
- if (cores[IF_COP_CORE(core)].running == NULL)
- {
- /* Enter sleep mode to reduce power usage - woken up on interrupt
- * or wakeup request from another core - expected to enable all
- * interrupts. */
- core_sleep(IF_COP(core));
- continue;
- }
-
- set_irq_level(0);
- return cores[IF_COP_CORE(core)].running;
- }
-}
-
#ifdef RB_PROFILE
void profile_thread(void)
{
@@ -1502,55 +1835,34 @@ void profile_thread(void)
/*---------------------------------------------------------------------------
* Prepares a thread to block on an object's list and/or for a specified
- * duration - expects object and slot to be appropriately locked if needed.
+ * duration - expects object and slot to be appropriately locked if needed
+ * and interrupts to be masked.
*---------------------------------------------------------------------------
*/
-static inline void _block_thread_on_l(struct thread_queue *list,
- struct thread_entry *thread,
- unsigned state
- IF_SWCL(, const bool nolock))
+static inline void block_thread_on_l(struct thread_entry *thread,
+ unsigned state)
{
/* If inlined, unreachable branches will be pruned with no size penalty
- because constant params are used for state and nolock. */
+ because state is passed as a constant parameter. */
const unsigned int core = IF_COP_CORE(thread->core);
/* Remove the thread from the list of running threads. */
+ RTR_LOCK(core);
remove_from_list_l(&cores[core].running, thread);
+ rtr_subtract_entry(core, thread->priority);
+ RTR_UNLOCK(core);
/* Add a timeout to the block if not infinite */
switch (state)
{
case STATE_BLOCKED:
- /* Put the thread into a new list of inactive threads. */
-#if CONFIG_CORELOCK == SW_CORELOCK
- if (nolock)
- {
- thread->bqp = NULL; /* Indicate nolock list */
- thread->bqnlp = (struct thread_entry **)list;
- add_to_list_l((struct thread_entry **)list, thread);
- }
- else
-#endif
- {
- thread->bqp = list;
- add_to_list_l_locked(list, thread);
- }
- break;
case STATE_BLOCKED_W_TMO:
/* Put the thread into a new list of inactive threads. */
-#if CONFIG_CORELOCK == SW_CORELOCK
- if (nolock)
- {
- thread->bqp = NULL; /* Indicate nolock list */
- thread->bqnlp = (struct thread_entry **)list;
- add_to_list_l((struct thread_entry **)list, thread);
- }
- else
-#endif
- {
- thread->bqp = list;
- add_to_list_l_locked(list, thread);
- }
+ add_to_list_l(thread->bqp, thread);
+
+ if (state == STATE_BLOCKED)
+ break;
+
/* Fall-through */
case STATE_SLEEPING:
/* If this thread times out sooner than any other thread, update
@@ -1568,35 +1880,11 @@ static inline void _block_thread_on_l(struct thread_queue *list,
break;
}
-#ifdef HAVE_PRIORITY_SCHEDULING
- /* Reset priorities */
- if (thread->priority == cores[core].highest_priority)
- cores[core].highest_priority = LOWEST_PRIORITY;
-#endif
+ /* Remember the the next thread about to block. */
+ cores[core].block_task = thread;
-#if NUM_CORES == 1 || CONFIG_CORELOCK == SW_CORELOCK
- /* Safe to set state now */
+ /* Report new state. */
thread->state = state;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- cores[core].blk_ops.state = state;
-#endif
-
-#if NUM_CORES > 1
- /* Delay slot unlock until task switch */
- cores[core].blk_ops.flags |= TBOP_UNLOCK_CURRENT;
-#endif
-}
-
-static inline void block_thread_on_l(
- struct thread_queue *list, struct thread_entry *thread, unsigned state)
-{
- _block_thread_on_l(list, thread, state IF_SWCL(, false));
-}
-
-static inline void block_thread_on_l_no_listlock(
- struct thread_entry **list, struct thread_entry *thread, unsigned state)
-{
- _block_thread_on_l((struct thread_queue *)list, thread, state IF_SWCL(, true));
}
/*---------------------------------------------------------------------------
@@ -1607,72 +1895,134 @@ static inline void block_thread_on_l_no_listlock(
* INTERNAL: Intended for use by kernel and not for programs.
*---------------------------------------------------------------------------
*/
-void switch_thread(struct thread_entry *old)
+void switch_thread(void)
{
const unsigned int core = CURRENT_CORE;
+ struct thread_entry *block = cores[core].block_task;
struct thread_entry *thread = cores[core].running;
- struct thread_entry *block = old;
- if (block == NULL)
- old = thread;
+ /* Get context to save - next thread to run is unknown until all wakeups
+ * are evaluated */
+ if (block != NULL)
+ {
+ cores[core].block_task = NULL;
+
+#if NUM_CORES > 1
+ if (thread == block)
+ {
+ /* This was the last thread running and another core woke us before
+ * reaching here. Force next thread selection to give tmo threads or
+ * other threads woken before this block a first chance. */
+ block = NULL;
+ }
+ else
+#endif
+ {
+ /* Blocking task is the old one */
+ thread = block;
+ }
+ }
#ifdef RB_PROFILE
- profile_thread_stopped(old - threads);
+ profile_thread_stopped(thread - threads);
#endif
/* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior
* to this call. */
- store_context(&old->context);
+ store_context(&thread->context);
/* Check if the current thread stack is overflown */
- if(((unsigned int *)old->stack)[0] != DEADBEEF)
- thread_stkov(old);
+ if (thread->stack[0] != DEADBEEF)
+ thread_stkov(thread);
#if NUM_CORES > 1
/* Run any blocking operations requested before switching/sleeping */
- run_blocking_ops(core, old);
+ run_blocking_ops(core, thread);
#endif
- /* Go through the list of sleeping task to check if we need to wake up
- * any of them due to timeout. Also puts core into sleep state until
- * there is at least one running process again. */
- thread = sleep_core(IF_COP(core));
-
#ifdef HAVE_PRIORITY_SCHEDULING
- /* Select the new task based on priorities and the last time a process
- * got CPU time. */
- if (block == NULL)
- thread = thread->l.next;
+ /* Reset the value of thread's skip count */
+ thread->skip_count = 0;
+#endif
for (;;)
{
- int priority = thread->priority;
+ /* If there are threads on a timeout and the earliest wakeup is due,
+ * check the list and wake any threads that need to start running
+ * again. */
+ if (!TIME_BEFORE(current_tick, cores[core].next_tmo_check))
+ {
+ check_tmo_threads();
+ }
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ RTR_LOCK(core);
- if (priority < cores[core].highest_priority)
- cores[core].highest_priority = priority;
+ thread = cores[core].running;
- if (priority == cores[core].highest_priority ||
- thread->priority_x < cores[core].highest_priority ||
- (current_tick - thread->last_run > priority * 8))
+ if (thread == NULL)
{
- cores[core].running = thread;
- break;
+ /* Enter sleep mode to reduce power usage - woken up on interrupt
+ * or wakeup request from another core - expected to enable
+ * interrupts. */
+ RTR_UNLOCK(core);
+ core_sleep(IF_COP(core));
}
+ else
+ {
+#ifdef HAVE_PRIORITY_SCHEDULING
+ /* Select the new task based on priorities and the last time a
+ * process got CPU time relative to the highest priority runnable
+ * task. */
+ struct priority_distribution *pd = &cores[core].rtr;
+ int max = find_first_set_bit(pd->mask);
- thread = thread->l.next;
- }
-
- /* Reset the value of thread's last running time to the current time. */
- thread->last_run = current_tick;
+ if (block == NULL)
+ {
+ /* Not switching on a block, tentatively select next thread */
+ thread = thread->l.next;
+ }
+
+ for (;;)
+ {
+ int priority = thread->priority;
+ int diff;
+
+ /* This ridiculously simple method of aging seems to work
+ * suspiciously well. It does tend to reward CPU hogs (under
+ * yielding) but that's generally not desirable at all. On the
+ * plus side, it, relatively to other threads, penalizes excess
+ * yielding which is good if some high priority thread is
+ * performing no useful work such as polling for a device to be
+ * ready. Of course, aging is only employed when higher and lower
+ * priority threads are runnable. The highest priority runnable
+ * thread(s) are never skipped. */
+ if (priority <= max ||
+ (diff = priority - max, ++thread->skip_count > diff*diff))
+ {
+ cores[core].running = thread;
+ break;
+ }
+
+ thread = thread->l.next;
+ }
#else
- if (block == NULL)
- {
- thread = thread->l.next;
- cores[core].running = thread;
- }
+ /* Without priority use a simple FCFS algorithm */
+ if (block == NULL)
+ {
+ /* Not switching on a block, select next thread */
+ thread = thread->l.next;
+ cores[core].running = thread;
+ }
#endif /* HAVE_PRIORITY_SCHEDULING */
+ RTR_UNLOCK(core);
+ set_irq_level(0);
+ break;
+ }
+ }
+
/* And finally give control to the next thread. */
load_context(&thread->context);
@@ -1682,314 +2032,210 @@ void switch_thread(struct thread_entry *old)
}
/*---------------------------------------------------------------------------
- * Change the boost state of a thread boosting or unboosting the CPU
- * as required. Require thread slot to be locked first.
- *---------------------------------------------------------------------------
- */
-static inline void boost_thread(struct thread_entry *thread, bool boost)
-{
-#ifdef HAVE_SCHEDULER_BOOSTCTRL
- if ((thread->boosted != 0) != boost)
- {
- thread->boosted = boost;
- cpu_boost(boost);
- }
-#endif
- (void)thread; (void)boost;
-}
-
-/*---------------------------------------------------------------------------
- * Sleeps a thread for a specified number of ticks and unboost the thread if
- * if it is boosted. If ticks is zero, it does not delay but instead switches
- * tasks.
+ * Sleeps a thread for at least a specified number of ticks with zero being
+ * a wait until the next tick.
*
* INTERNAL: Intended for use by kernel and not for programs.
*---------------------------------------------------------------------------
*/
void sleep_thread(int ticks)
{
- /* Get the entry for the current running thread. */
struct thread_entry *current = cores[CURRENT_CORE].running;
-#if NUM_CORES > 1
- /* Lock thread slot */
- GET_THREAD_STATE(current);
-#endif
+ LOCK_THREAD(current);
- /* Set our timeout, change lists, and finally switch threads.
- * Unlock during switch on mulicore. */
+ /* Set our timeout, remove from run list and join timeout list. */
current->tmo_tick = current_tick + ticks + 1;
- block_thread_on_l(NULL, current, STATE_SLEEPING);
- switch_thread(current);
+ block_thread_on_l(current, STATE_SLEEPING);
- /* Our status should be STATE_RUNNING */
- THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
- "S:R->!*R", current);
+ UNLOCK_THREAD(current);
}
/*---------------------------------------------------------------------------
* Indefinitely block a thread on a blocking queue for explicit wakeup.
- * Caller with interrupt-accessible lists should disable interrupts first
- * and request a BOP_IRQ_LEVEL blocking operation to reset it.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*---------------------------------------------------------------------------
*/
-IF_SWCL(static inline) void _block_thread(struct thread_queue *list
- IF_SWCL(, const bool nolock))
+void block_thread(struct thread_entry *current)
{
- /* Get the entry for the current running thread. */
- struct thread_entry *current = cores[CURRENT_CORE].running;
-
- /* Set the state to blocked and ask the scheduler to switch tasks,
- * this takes us off of the run queue until we are explicitly woken */
+ /* Set the state to blocked and take us off of the run queue until we
+ * are explicitly woken */
+ LOCK_THREAD(current);
-#if NUM_CORES > 1
- /* Lock thread slot */
- GET_THREAD_STATE(current);
-#endif
+ /* Set the list for explicit wakeup */
+ block_thread_on_l(current, STATE_BLOCKED);
-#if CONFIG_CORELOCK == SW_CORELOCK
- /* One branch optimized away during inlining */
- if (nolock)
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (current->blocker != NULL)
{
- block_thread_on_l_no_listlock((struct thread_entry **)list,
- current, STATE_BLOCKED);
+ /* Object supports PIP */
+ current = blocker_inherit_priority(current);
}
- else
#endif
- {
- block_thread_on_l(list, current, STATE_BLOCKED);
- }
-
- switch_thread(current);
-
- /* Our status should be STATE_RUNNING */
- THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
- "B:R->!*R", current);
-}
-
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* Inline lock/nolock version of _block_thread into these functions */
-void block_thread(struct thread_queue *tq)
-{
- _block_thread(tq, false);
-}
-void block_thread_no_listlock(struct thread_entry **list)
-{
- _block_thread((struct thread_queue *)list, true);
+ UNLOCK_THREAD(current);
}
-#endif /* CONFIG_CORELOCK */
/*---------------------------------------------------------------------------
* Block a thread on a blocking queue for a specified time interval or until
* explicitly woken - whichever happens first.
- * Caller with interrupt-accessible lists should disable interrupts first
- * and request that interrupt level be restored after switching out the
- * current thread.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*---------------------------------------------------------------------------
*/
-void block_thread_w_tmo(struct thread_queue *list, int timeout)
+void block_thread_w_tmo(struct thread_entry *current, int timeout)
{
/* Get the entry for the current running thread. */
- struct thread_entry *current = cores[CURRENT_CORE].running;
-
-#if NUM_CORES > 1
- /* Lock thread slot */
- GET_THREAD_STATE(current);
-#endif
+ LOCK_THREAD(current);
/* Set the state to blocked with the specified timeout */
current->tmo_tick = current_tick + timeout;
+
/* Set the list for explicit wakeup */
- block_thread_on_l(list, current, STATE_BLOCKED_W_TMO);
+ block_thread_on_l(current, STATE_BLOCKED_W_TMO);
- /* Now force a task switch and block until we have been woken up
- * by another thread or timeout is reached - whichever happens first */
- switch_thread(current);
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (current->blocker != NULL)
+ {
+ /* Object supports PIP */
+ current = blocker_inherit_priority(current);
+ }
+#endif
- /* Our status should be STATE_RUNNING */
- THREAD_ASSERT(peek_thread_state(current) == STATE_RUNNING,
- "T:R->!*R", current);
+ UNLOCK_THREAD(current);
}
/*---------------------------------------------------------------------------
- * Explicitly wakeup a thread on a blocking queue. Has no effect on threads
- * that called sleep().
- * Caller with interrupt-accessible lists should disable interrupts first.
- * This code should be considered a critical section by the caller.
+ * Explicitly wakeup a thread on a blocking queue. Only effects threads of
+ * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
+ *
+ * This code should be considered a critical section by the caller meaning
+ * that the object's corelock should be held.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*---------------------------------------------------------------------------
*/
-IF_SWCL(static inline) struct thread_entry * _wakeup_thread(
- struct thread_queue *list IF_SWCL(, const bool nolock))
+unsigned int wakeup_thread(struct thread_entry **list)
{
- struct thread_entry *t;
- struct thread_entry *thread;
- unsigned state;
-
- /* Wake up the last thread first. */
-#if CONFIG_CORELOCK == SW_CORELOCK
- /* One branch optimized away during inlining */
- if (nolock)
- {
- t = list->queue;
- }
- else
-#endif
- {
- t = LOCK_LIST(list);
- }
+ struct thread_entry *thread = *list;
+ unsigned int result = THREAD_NONE;
/* Check if there is a blocked thread at all. */
- if (t == NULL)
- {
-#if CONFIG_CORELOCK == SW_CORELOCK
- if (!nolock)
-#endif
- {
- UNLOCK_LIST(list, NULL);
- }
- return NULL;
- }
+ if (thread == NULL)
+ return result;
- thread = t;
-
-#if NUM_CORES > 1
-#if CONFIG_CORELOCK == SW_CORELOCK
- if (nolock)
- {
- /* Lock thread only, not list */
- state = GET_THREAD_STATE(thread);
- }
- else
-#endif
- {
- /* This locks in reverse order from other routines so a retry in the
- correct order may be needed */
- state = TRY_GET_THREAD_STATE(thread);
- if (state == STATE_BUSY)
- {
- /* Unlock list and retry slot, then list */
- UNLOCK_LIST(list, t);
- state = GET_THREAD_STATE(thread);
- t = LOCK_LIST(list);
- /* Be sure thread still exists here - it couldn't have re-added
- itself if it was woken elsewhere because this function is
- serialized within the object that owns the list. */
- if (thread != t)
- {
- /* Thread disappeared :( */
- UNLOCK_LIST(list, t);
- UNLOCK_THREAD(thread, state);
- return THREAD_WAKEUP_MISSING; /* Indicate disappearance */
- }
- }
- }
-#else /* NUM_CORES == 1 */
- state = GET_THREAD_STATE(thread);
-#endif /* NUM_CORES */
+ LOCK_THREAD(thread);
/* Determine thread's current state. */
- switch (state)
+ switch (thread->state)
{
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
- /* Remove thread from object's blocked list - select t or list depending
- on locking type at compile time */
- REMOVE_FROM_LIST_L_SELECT(t, list, thread);
-#if CONFIG_CORELOCK == SW_CORELOCK
- /* Statment optimized away during inlining if nolock != false */
- if (!nolock)
-#endif
+ remove_from_list_l(list, thread);
+
+ result = THREAD_OK;
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ struct thread_entry *current;
+ struct blocker *bl = thread->blocker;
+
+ if (bl == NULL)
{
- UNLOCK_LIST(list, t); /* Unlock list - removal complete */
+ /* No inheritance - just boost the thread by aging */
+ thread->skip_count = thread->priority;
+ current = cores[CURRENT_CORE].running;
+ }
+ else
+ {
+ /* Call the specified unblocking PIP */
+ current = bl->wakeup_protocol(thread);
}
-#ifdef HAVE_PRIORITY_SCHEDULING
- /* Give the task a kick to avoid a stall after wakeup.
- Not really proper treatment - TODO later. */
- thread->last_run = current_tick - 8*LOWEST_PRIORITY;
-#endif
+ if (current != NULL && thread->priority < current->priority
+ IF_COP( && thread->core == current->core ))
+ {
+ /* Woken thread is higher priority and exists on the same CPU core;
+ * recommend a task switch. Knowing if this is an interrupt call
+ * would be helpful here. */
+ result |= THREAD_SWITCH;
+ }
+#endif /* HAVE_PRIORITY_SCHEDULING */
+
core_schedule_wakeup(thread);
- UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
- return thread;
- default:
- /* Nothing to do. State is not blocked. */
+ break;
+
+ /* Nothing to do. State is not blocked. */
#if THREAD_EXTRA_CHECKS
+ default:
THREAD_PANICF("wakeup_thread->block invalid", thread);
case STATE_RUNNING:
case STATE_KILLED:
+ break;
#endif
-#if CONFIG_CORELOCK == SW_CORELOCK
- /* Statement optimized away during inlining if nolock != false */
- if (!nolock)
-#endif
- {
- UNLOCK_LIST(list, t); /* Unlock the object's list */
- }
- UNLOCK_THREAD(thread, state); /* Unlock thread slot */
- return NULL;
}
-}
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* Inline lock/nolock version of _wakeup_thread into these functions */
-struct thread_entry * wakeup_thread(struct thread_queue *tq)
-{
- return _wakeup_thread(tq, false);
+ UNLOCK_THREAD(thread);
+ return result;
}
-struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
+/*---------------------------------------------------------------------------
+ * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
+ * from each operation or THREAD_NONE of nothing was awakened. Object owning
+ * the queue must be locked first.
+ *
+ * INTERNAL: Intended for use by kernel objects and not for programs.
+ *---------------------------------------------------------------------------
+ */
+unsigned int thread_queue_wake(struct thread_entry **list)
{
- return _wakeup_thread((struct thread_queue *)list, true);
+ unsigned result = THREAD_NONE;
+
+ for (;;)
+ {
+ unsigned int rc = wakeup_thread(list);
+
+ if (rc == THREAD_NONE)
+ break; /* No more threads */
+
+ result |= rc;
+ }
+
+ return result;
}
-#endif /* CONFIG_CORELOCK */
/*---------------------------------------------------------------------------
* Find an empty thread slot or MAXTHREADS if none found. The slot returned
* will be locked on multicore.
*---------------------------------------------------------------------------
*/
-static int find_empty_thread_slot(void)
+static struct thread_entry * find_empty_thread_slot(void)
{
-#if NUM_CORES > 1
- /* Any slot could be on an IRQ-accessible list */
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-#endif
- /* Thread slots are not locked on single core */
-
+ /* Any slot could be on an interrupt-accessible list */
+ IF_COP( int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL); )
+ struct thread_entry *thread = NULL;
int n;
for (n = 0; n < MAXTHREADS; n++)
{
/* Obtain current slot state - lock it on multicore */
- unsigned state = GET_THREAD_STATE(&threads[n]);
+ struct thread_entry *t = &threads[n];
+ LOCK_THREAD(t);
- if (state == STATE_KILLED
-#if NUM_CORES > 1
- && threads[n].name != THREAD_DESTRUCT
-#endif
- )
+ if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
{
/* Slot is empty - leave it locked and caller will unlock */
+ thread = t;
break;
}
/* Finished examining slot - no longer busy - unlock on multicore */
- UNLOCK_THREAD(&threads[n], state);
+ UNLOCK_THREAD(t);
}
-#if NUM_CORES > 1
- set_irq_level(oldlevel); /* Reenable interrups - this slot is
- not accesible to them yet */
-#endif
-
- return n;
+ IF_COP( set_irq_level(oldlevel); ) /* Reenable interrups - this slot is
+ not accesible to them yet */
+ return thread;
}
@@ -2000,65 +2246,68 @@ static int find_empty_thread_slot(void)
*/
void core_idle(void)
{
-#if NUM_CORES > 1
- const unsigned int core = CURRENT_CORE;
-#endif
+ IF_COP( const unsigned int core = CURRENT_CORE; )
set_irq_level(HIGHEST_IRQ_LEVEL);
core_sleep(IF_COP(core));
}
/*---------------------------------------------------------------------------
- * Create a thread
- * If using a dual core architecture, specify which core to start the thread
- * on, and whether to fall back to the other core if it can't be created
+ * Create a thread. If using a dual core architecture, specify which core to
+ * start the thread on.
+ *
* Return ID if context area could be allocated, else NULL.
*---------------------------------------------------------------------------
*/
struct thread_entry*
- create_thread(void (*function)(void), void* stack, int stack_size,
+ create_thread(void (*function)(void), void* stack, size_t stack_size,
unsigned flags, const char *name
IF_PRIO(, int priority)
IF_COP(, unsigned int core))
{
unsigned int i;
- unsigned int stacklen;
- unsigned int *stackptr;
- int slot;
+ unsigned int stack_words;
+ uintptr_t stackptr, stackend;
struct thread_entry *thread;
unsigned state;
+ int oldlevel;
- slot = find_empty_thread_slot();
- if (slot >= MAXTHREADS)
+ thread = find_empty_thread_slot();
+ if (thread == NULL)
{
return NULL;
}
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
/* Munge the stack to make it easy to spot stack overflows */
- stacklen = stack_size / sizeof(int);
- stackptr = stack;
- for(i = 0;i < stacklen;i++)
+ stackptr = ALIGN_UP((uintptr_t)stack, sizeof (uintptr_t));
+ stackend = ALIGN_DOWN((uintptr_t)stack + stack_size, sizeof (uintptr_t));
+ stack_size = stackend - stackptr;
+ stack_words = stack_size / sizeof (uintptr_t);
+
+ for (i = 0; i < stack_words; i++)
{
- stackptr[i] = DEADBEEF;
+ ((uintptr_t *)stackptr)[i] = DEADBEEF;
}
/* Store interesting information */
- thread = &threads[slot];
thread->name = name;
- thread->stack = stack;
+ thread->stack = (uintptr_t *)stackptr;
thread->stack_size = stack_size;
- thread->bqp = NULL;
-#if CONFIG_CORELOCK == SW_CORELOCK
- thread->bqnlp = NULL;
-#endif
thread->queue = NULL;
+#ifdef HAVE_WAKEUP_EXT_CB
+ thread->wakeup_ext_cb = NULL;
+#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL
- thread->boosted = 0;
+ thread->cpu_boost = 0;
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
- thread->priority_x = LOWEST_PRIORITY;
+ memset(&thread->pdist, 0, sizeof(thread->pdist));
+ thread->blocker = NULL;
+ thread->base_priority = priority;
thread->priority = priority;
- thread->last_run = current_tick - priority * 8;
- cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
+ thread->skip_count = priority;
+ prio_add_entry(&thread->pdist, priority);
#endif
#if NUM_CORES > 1
@@ -2077,70 +2326,160 @@ struct thread_entry*
state = (flags & CREATE_THREAD_FROZEN) ?
STATE_FROZEN : STATE_RUNNING;
- /* Align stack to an even 32 bit boundary */
- thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
+ thread->context.sp = (typeof (thread->context.sp))stackend;
/* Load the thread's context structure with needed startup information */
THREAD_STARTUP_INIT(core, thread, function);
+ thread->state = state;
+
if (state == STATE_RUNNING)
- {
-#if NUM_CORES > 1
- if (core != CURRENT_CORE)
- {
- /* Next task switch on other core moves thread to running list */
- core_schedule_wakeup(thread);
- }
- else
-#endif
- {
- /* Place on running list immediately */
- add_to_list_l(&cores[IF_COP_CORE(core)].running, thread);
- }
- }
+ core_schedule_wakeup(thread);
+
+ UNLOCK_THREAD(thread);
+
+ set_irq_level(oldlevel);
- /* remove lock and set state */
- UNLOCK_THREAD_SET_STATE(thread, state);
-
return thread;
}
#ifdef HAVE_SCHEDULER_BOOSTCTRL
+/*---------------------------------------------------------------------------
+ * Change the boost state of a thread boosting or unboosting the CPU
+ * as required.
+ *---------------------------------------------------------------------------
+ */
+static inline void boost_thread(struct thread_entry *thread, bool boost)
+{
+ if ((thread->cpu_boost != 0) != boost)
+ {
+ thread->cpu_boost = boost;
+ cpu_boost(boost);
+ }
+}
+
void trigger_cpu_boost(void)
{
- /* No IRQ disable nescessary since the current thread cannot be blocked
- on an IRQ-accessible list */
struct thread_entry *current = cores[CURRENT_CORE].running;
- unsigned state;
-
- state = GET_THREAD_STATE(current);
boost_thread(current, true);
- UNLOCK_THREAD(current, state);
-
- (void)state;
}
void cancel_cpu_boost(void)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
- unsigned state;
-
- state = GET_THREAD_STATE(current);
boost_thread(current, false);
- UNLOCK_THREAD(current, state);
-
- (void)state;
}
#endif /* HAVE_SCHEDULER_BOOSTCTRL */
/*---------------------------------------------------------------------------
- * Remove a thread from the scheduler.
+ * Block the current thread until another thread terminates. A thread may
+ * wait on itself to terminate which prevents it from running again and it
+ * will need to be killed externally.
+ * Parameter is the ID as returned from create_thread().
+ *---------------------------------------------------------------------------
+ */
+void thread_wait(struct thread_entry *thread)
+{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
+ if (thread == NULL)
+ thread = current;
+
+ /* Lock thread-as-waitable-object lock */
+ corelock_lock(&thread->waiter_cl);
+
+ /* Be sure it hasn't been killed yet */
+ if (thread->state != STATE_KILLED)
+ {
+ IF_COP( current->obj_cl = &thread->waiter_cl; )
+ current->bqp = &thread->queue;
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+ block_thread(current);
+
+ corelock_unlock(&thread->waiter_cl);
+
+ switch_thread();
+ return;
+ }
+
+ corelock_unlock(&thread->waiter_cl);
+}
+
+/*---------------------------------------------------------------------------
+ * Exit the current thread. The Right Way to Do Things (TM).
+ *---------------------------------------------------------------------------
+ */
+void thread_exit(void)
+{
+ const unsigned int core = CURRENT_CORE;
+ struct thread_entry *current = cores[core].running;
+
+ /* Cancel CPU boost if any */
+ cancel_cpu_boost();
+
+ set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ corelock_lock(&current->waiter_cl);
+ LOCK_THREAD(current);
+
+#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
+ if (current->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - become a waiter */
+ UNLOCK_THREAD(current);
+ corelock_unlock(&current->waiter_cl);
+ thread_wait(current);
+ THREAD_PANICF("thread_exit->WK:*R", current);
+ }
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ check_for_obj_waiters("thread_exit", current);
+#endif
+
+ if (current->tmo.prev != NULL)
+ {
+ /* Cancel pending timeout list removal */
+ remove_from_list_tmo(current);
+ }
+
+ /* Switch tasks and never return */
+ block_thread_on_l(current, STATE_KILLED);
+
+#if NUM_CORES > 1
+ /* Switch to the idle stack if not on the main core (where "main"
+ * runs) - we can hope gcc doesn't need the old stack beyond this
+ * point. */
+ if (core != CPU)
+ {
+ switch_to_idle_stack(core);
+ }
+
+ flush_icache();
+#endif
+ current->name = NULL;
+
+ /* Signal this thread */
+ thread_queue_wake(&current->queue);
+ corelock_unlock(&current->waiter_cl);
+ /* Slot must be unusable until thread is really gone */
+ UNLOCK_THREAD_AT_TASK_SWITCH(current);
+ switch_thread();
+ /* This should never and must never be reached - if it is, the
+ * state is corrupted */
+ THREAD_PANICF("thread_exit->K:*R", current);
+}
+
+#ifdef ALLOW_REMOVE_THREAD
+/*---------------------------------------------------------------------------
+ * Remove a thread from the scheduler. Not The Right Way to Do Things in
+ * normal programs.
+ *
* Parameter is the ID as returned from create_thread().
*
* Use with care on threads that are not under careful control as this may
- * leave various objects in an undefined state. When trying to kill a thread
- * on another processor, be sure you know what it's doing and won't be
- * switching around itself.
+ * leave various objects in an undefined state.
*---------------------------------------------------------------------------
*/
void remove_thread(struct thread_entry *thread)
@@ -2149,17 +2488,27 @@ void remove_thread(struct thread_entry *thread)
/* core is not constant here because of core switching */
unsigned int core = CURRENT_CORE;
unsigned int old_core = NUM_CORES;
+ struct corelock *ocl = NULL;
#else
const unsigned int core = CURRENT_CORE;
#endif
+ struct thread_entry *current = cores[core].running;
+
unsigned state;
int oldlevel;
if (thread == NULL)
- thread = cores[core].running;
+ thread = current;
+
+ if (thread == current)
+ thread_exit(); /* Current thread - do normal exit */
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- state = GET_THREAD_STATE(thread);
+
+ corelock_lock(&thread->waiter_cl);
+ LOCK_THREAD(thread);
+
+ state = thread->state;
if (state == STATE_KILLED)
{
@@ -2167,50 +2516,49 @@ void remove_thread(struct thread_entry *thread)
}
#if NUM_CORES > 1
+ if (thread->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - become a waiter */
+ UNLOCK_THREAD(thread);
+ corelock_unlock(&thread->waiter_cl);
+ set_irq_level(oldlevel);
+ thread_wait(thread);
+ return;
+ }
+
+ thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ check_for_obj_waiters("remove_thread", thread);
+#endif
+
if (thread->core != core)
{
/* Switch cores and safely extract the thread there */
- /* Slot HAS to be unlocked or a deadlock could occur - potential livelock
- condition if the thread runs away to another processor. */
+ /* Slot HAS to be unlocked or a deadlock could occur which means other
+ * threads have to be guided into becoming thread waiters if they
+ * attempt to remove it. */
unsigned int new_core = thread->core;
- const char *old_name = thread->name;
- thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
- UNLOCK_THREAD(thread, state);
+ corelock_unlock(&thread->waiter_cl);
+
+ UNLOCK_THREAD(thread);
set_irq_level(oldlevel);
old_core = switch_core(new_core);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- state = GET_THREAD_STATE(thread);
-
- core = new_core;
-
- if (state == STATE_KILLED)
- {
- /* Thread suicided before we could kill it */
- goto thread_killed;
- }
-
- /* Reopen slot - it's locked again anyway */
- thread->name = old_name;
- if (thread->core != core)
- {
- /* We won't play thread tag - just forget it */
- UNLOCK_THREAD(thread, state);
- set_irq_level(oldlevel);
- goto thread_kill_abort;
- }
+ corelock_lock(&thread->waiter_cl);
+ LOCK_THREAD(thread);
+ state = thread->state;
+ core = new_core;
/* Perform the extraction and switch ourselves back to the original
processor */
}
#endif /* NUM_CORES > 1 */
-#ifdef HAVE_PRIORITY_SCHEDULING
- cores[IF_COP_CORE(core)].highest_priority = LOWEST_PRIORITY;
-#endif
if (thread->tmo.prev != NULL)
{
/* Clean thread off the timeout list if a timeout check hasn't
@@ -2218,87 +2566,86 @@ void remove_thread(struct thread_entry *thread)
remove_from_list_tmo(thread);
}
+#ifdef HAVE_SCHEDULER_BOOSTCTRL
+ /* Cancel CPU boost if any */
boost_thread(thread, false);
-
- if (thread == cores[core].running)
- {
- /* Suicide - thread has unconditional rights to do this */
- /* Maintain locks until switch-out */
- block_thread_on_l(NULL, thread, STATE_KILLED);
-
-#if NUM_CORES > 1
- /* Switch to the idle stack if not on the main core (where "main"
- * runs) */
- if (core != CPU)
- {
- switch_to_idle_stack(core);
- }
-
- flush_icache();
#endif
- /* Signal this thread */
- thread_queue_wake_no_listlock(&thread->queue);
- /* Switch tasks and never return */
- switch_thread(thread);
- /* This should never and must never be reached - if it is, the
- * state is corrupted */
- THREAD_PANICF("remove_thread->K:*R", thread);
- }
-#if NUM_CORES > 1
- if (thread->name == THREAD_DESTRUCT)
- {
- /* Another core is doing this operation already */
- UNLOCK_THREAD(thread, state);
- set_irq_level(oldlevel);
- return;
- }
-#endif
- if (cores[core].waking.queue != NULL)
- {
- /* Get any threads off the waking list and onto the running
- * list first - waking and running cannot be distinguished by
- * state */
- core_perform_wakeup(IF_COP(core));
- }
+IF_COP( retry_state: )
switch (state)
{
case STATE_RUNNING:
+ RTR_LOCK(core);
/* Remove thread from ready to run tasks */
remove_from_list_l(&cores[core].running, thread);
+ rtr_subtract_entry(core, thread->priority);
+ RTR_UNLOCK(core);
break;
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
/* Remove thread from the queue it's blocked on - including its
* own if waiting there */
-#if CONFIG_CORELOCK == SW_CORELOCK
- /* One or the other will be valid */
- if (thread->bqp == NULL)
+#if NUM_CORES > 1
+ if (&thread->waiter_cl != thread->obj_cl)
{
- remove_from_list_l(thread->bqnlp, thread);
+ ocl = thread->obj_cl;
+
+ if (corelock_try_lock(ocl) == 0)
+ {
+ UNLOCK_THREAD(thread);
+ corelock_lock(ocl);
+ LOCK_THREAD(thread);
+
+ if (thread->state != state)
+ {
+ /* Something woke the thread */
+ state = thread->state;
+ corelock_unlock(ocl);
+ goto retry_state;
+ }
+ }
}
- else
-#endif /* CONFIG_CORELOCK */
+#endif
+ remove_from_list_l(thread->bqp, thread);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (thread->wakeup_ext_cb != NULL)
+ thread->wakeup_ext_cb(thread);
+#endif
+
+#ifdef HAVE_PRIORITY_SCHEDULING
+ if (thread->blocker != NULL)
{
- remove_from_list_l_locked(thread->bqp, thread);
+ /* Remove thread's priority influence from its chain */
+ wakeup_priority_protocol_release(thread);
}
+#endif
+
+#if NUM_CORES > 1
+ if (ocl != NULL)
+ corelock_unlock(ocl);
+#endif
break;
- /* Otherwise thread is killed or is frozen and hasn't run yet */
+ /* Otherwise thread is frozen and hasn't run yet */
}
+ thread->state = STATE_KILLED;
+
/* If thread was waiting on itself, it will have been removed above.
* The wrong order would result in waking the thread first and deadlocking
* since the slot is already locked. */
- thread_queue_wake_no_listlock(&thread->queue);
+ thread_queue_wake(&thread->queue);
+
+ thread->name = NULL;
thread_killed: /* Thread was already killed */
- /* Removal complete - safe to unlock state and reenable interrupts */
- UNLOCK_THREAD_SET_STATE(thread, STATE_KILLED);
+ /* Removal complete - safe to unlock and reenable interrupts */
+ corelock_unlock(&thread->waiter_cl);
+ UNLOCK_THREAD(thread);
set_irq_level(oldlevel);
#if NUM_CORES > 1
-thread_kill_abort: /* Something stopped us from killing the thread */
if (old_core < NUM_CORES)
{
/* Did a removal on another processor's thread - switch back to
@@ -2307,114 +2654,147 @@ thread_kill_abort: /* Something stopped us from killing the thread */
}
#endif
}
+#endif /* ALLOW_REMOVE_THREAD */
+#ifdef HAVE_PRIORITY_SCHEDULING
/*---------------------------------------------------------------------------
- * Block the current thread until another thread terminates. A thread may
- * wait on itself to terminate which prevents it from running again and it
- * will need to be killed externally.
- * Parameter is the ID as returned from create_thread().
+ * Sets the thread's relative base priority for the core it runs on. Any
+ * needed inheritance changes also may happen.
*---------------------------------------------------------------------------
*/
-void thread_wait(struct thread_entry *thread)
+int thread_set_priority(struct thread_entry *thread, int priority)
{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry *current = cores[core].running;
- unsigned thread_state;
-#if NUM_CORES > 1
- int oldlevel;
- unsigned current_state;
-#endif
+ int old_base_priority = -1;
+
+ /* A little safety measure */
+ if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
+ return -1;
if (thread == NULL)
- thread = current;
+ thread = cores[CURRENT_CORE].running;
-#if NUM_CORES > 1
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-#endif
+ /* Thread could be on any list and therefore on an interrupt accessible
+ one - disable interrupts */
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- thread_state = GET_THREAD_STATE(thread);
+ LOCK_THREAD(thread);
-#if NUM_CORES > 1
- /* We can't lock the same slot twice. The waitee will also lock itself
- first then the thread slots that will be locked and woken in turn.
- The same order must be observed here as well. */
- if (thread == current)
- {
- current_state = thread_state;
- }
- else
+ /* Make sure it's not killed */
+ if (thread->state != STATE_KILLED)
{
- current_state = GET_THREAD_STATE(current);
- }
-#endif
+ int old_priority = thread->priority;
- if (thread_state != STATE_KILLED)
- {
- /* Unlock the waitee state at task switch - not done for self-wait
- because the would double-unlock the state and potentially
- corrupt another's busy assert on the slot */
- if (thread != current)
+ old_base_priority = thread->base_priority;
+ thread->base_priority = priority;
+
+ prio_move_entry(&thread->pdist, old_base_priority, priority);
+ priority = find_first_set_bit(thread->pdist.mask);
+
+ if (old_priority == priority)
{
-#if CONFIG_CORELOCK == SW_CORELOCK
- cores[core].blk_ops.flags |= TBOP_UNLOCK_THREAD;
- cores[core].blk_ops.thread = thread;
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
- cores[core].blk_ops.flags |= TBOP_SET_VARu8;
- cores[core].blk_ops.var_u8p = &thread->state;
- cores[core].blk_ops.var_u8v = thread_state;
-#endif
+ /* No priority change - do nothing */
}
- block_thread_on_l_no_listlock(&thread->queue, current, STATE_BLOCKED);
- switch_thread(current);
- return;
- }
+ else if (thread->state == STATE_RUNNING)
+ {
+ /* This thread is running - change location on the run
+ * queue. No transitive inheritance needed. */
+ set_running_thread_priority(thread, priority);
+ }
+ else
+ {
+ thread->priority = priority;
+
+ if (thread->blocker != NULL)
+ {
+ /* Bubble new priority down the chain */
+ struct blocker *bl = thread->blocker; /* Blocker struct */
+ struct thread_entry *bl_t = bl->thread; /* Blocking thread */
+ struct thread_entry * const tstart = thread; /* Initial thread */
+ const int highest = MIN(priority, old_priority); /* Higher of new or old */
- /* Unlock both slots - obviously the current thread can't have
- STATE_KILLED so the above if clause will always catch a thread
- waiting on itself */
+ for (;;)
+ {
+ struct thread_entry *next; /* Next thread to check */
+ int bl_pr; /* Highest blocked thread */
+ int queue_pr; /* New highest blocked thread */
#if NUM_CORES > 1
- UNLOCK_THREAD(current, current_state);
- UNLOCK_THREAD(thread, thread_state);
- set_irq_level(oldlevel);
-#endif
-}
+ /* Owner can change but thread cannot be dislodged - thread
+ * may not be the first in the queue which allows other
+ * threads ahead in the list to be given ownership during the
+ * operation. If thread is next then the waker will have to
+ * wait for us and the owner of the object will remain fixed.
+ * If we successfully grab the owner -- which at some point
+ * is guaranteed -- then the queue remains fixed until we
+ * pass by. */
+ for (;;)
+ {
+ LOCK_THREAD(bl_t);
-#ifdef HAVE_PRIORITY_SCHEDULING
-/*---------------------------------------------------------------------------
- * Sets the thread's relative priority for the core it runs on.
- *---------------------------------------------------------------------------
- */
-int thread_set_priority(struct thread_entry *thread, int priority)
-{
- unsigned old_priority = (unsigned)-1;
-
- if (thread == NULL)
- thread = cores[CURRENT_CORE].running;
+ /* Double-check the owner - retry if it changed */
+ if (bl->thread == bl_t)
+ break;
-#if NUM_CORES > 1
- /* Thread could be on any list and therefore on an interrupt accessible
- one - disable interrupts */
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ UNLOCK_THREAD(bl_t);
+ bl_t = bl->thread;
+ }
#endif
- unsigned state = GET_THREAD_STATE(thread);
+ bl_pr = bl->priority;
- /* Make sure it's not killed */
- if (state != STATE_KILLED)
- {
- old_priority = thread->priority;
- thread->priority = priority;
- cores[IF_COP_CORE(thread->core)].highest_priority = LOWEST_PRIORITY;
+ if (highest > bl_pr)
+ break; /* Object priority won't change */
+
+ /* This will include the thread being set */
+ queue_pr = find_highest_priority_in_list_l(*thread->bqp);
+
+ if (queue_pr == bl_pr)
+ break; /* Object priority not changing */
+
+ /* Update thread boost for this object */
+ bl->priority = queue_pr;
+ prio_move_entry(&bl_t->pdist, bl_pr, queue_pr);
+ bl_pr = find_first_set_bit(bl_t->pdist.mask);
+
+ if (bl_t->priority == bl_pr)
+ break; /* Blocking thread priority not changing */
+
+ if (bl_t->state == STATE_RUNNING)
+ {
+ /* Thread not blocked - we're done */
+ set_running_thread_priority(bl_t, bl_pr);
+ break;
+ }
+
+ bl_t->priority = bl_pr;
+ bl = bl_t->blocker; /* Blocking thread has a blocker? */
+
+ if (bl == NULL)
+ break; /* End of chain */
+
+ next = bl->thread;
+
+ if (next == tstart)
+ break; /* Full-circle */
+
+ UNLOCK_THREAD(thread);
+
+ thread = bl_t;
+ bl_t = next;
+ } /* for (;;) */
+
+ UNLOCK_THREAD(bl_t);
+ }
+ }
}
-#if NUM_CORES > 1
- UNLOCK_THREAD(thread, state);
+ UNLOCK_THREAD(thread);
+
set_irq_level(oldlevel);
-#endif
- return old_priority;
+
+ return old_base_priority;
}
/*---------------------------------------------------------------------------
- * Returns the current priority for a thread.
+ * Returns the current base priority for a thread.
*---------------------------------------------------------------------------
*/
int thread_get_priority(struct thread_entry *thread)
@@ -2423,64 +2803,26 @@ int thread_get_priority(struct thread_entry *thread)
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
- return (unsigned)thread->priority;
+ return thread->base_priority;
}
+#endif /* HAVE_PRIORITY_SCHEDULING */
/*---------------------------------------------------------------------------
- * Yield that guarantees thread execution once per round regardless of
- * thread's scheduler priority - basically a transient realtime boost
- * without altering the scheduler's thread precedence.
- *
- * HACK ALERT! Search for "priority inheritance" for proper treatment.
+ * Starts a frozen thread - similar semantics to wakeup_thread except that
+ * the thread is on no scheduler or wakeup queue at all. It exists simply by
+ * virtue of the slot having a state of STATE_FROZEN.
*---------------------------------------------------------------------------
*/
-void priority_yield(void)
-{
- const unsigned int core = CURRENT_CORE;
- struct thread_entry *thread = cores[core].running;
- thread->priority_x = HIGHEST_PRIORITY;
- switch_thread(NULL);
- thread->priority_x = LOWEST_PRIORITY;
-}
-#endif /* HAVE_PRIORITY_SCHEDULING */
-
-/* Resumes a frozen thread - similar logic to wakeup_thread except that
- the thread is on no scheduler list at all. It exists simply by virtue of
- the slot having a state of STATE_FROZEN. */
void thread_thaw(struct thread_entry *thread)
{
-#if NUM_CORES > 1
- /* Thread could be on any list and therefore on an interrupt accessible
- one - disable interrupts */
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-#endif
- unsigned state = GET_THREAD_STATE(thread);
+ LOCK_THREAD(thread);
- if (state == STATE_FROZEN)
- {
- const unsigned int core = CURRENT_CORE;
-#if NUM_CORES > 1
- if (thread->core != core)
- {
- core_schedule_wakeup(thread);
- }
- else
-#endif
- {
- add_to_list_l(&cores[core].running, thread);
- }
-
- UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING);
-#if NUM_CORES > 1
- set_irq_level(oldlevel);
-#endif
- return;
- }
+ if (thread->state == STATE_FROZEN)
+ core_schedule_wakeup(thread);
-#if NUM_CORES > 1
- UNLOCK_THREAD(thread, state);
+ UNLOCK_THREAD(thread);
set_irq_level(oldlevel);
-#endif
}
/*---------------------------------------------------------------------------
@@ -2501,21 +2843,31 @@ unsigned int switch_core(unsigned int new_core)
{
const unsigned int core = CURRENT_CORE;
struct thread_entry *current = cores[core].running;
- struct thread_entry *w;
- int oldlevel;
-
- /* Interrupts can access the lists that will be used - disable them */
- unsigned state = GET_THREAD_STATE(current);
if (core == new_core)
{
- /* No change - just unlock everything and return same core */
- UNLOCK_THREAD(current, state);
+ /* No change - just return same core */
return core;
}
+ int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ LOCK_THREAD(current);
+
+ if (current->name == THREAD_DESTRUCT)
+ {
+ /* Thread being killed - deactivate and let process complete */
+ UNLOCK_THREAD(current);
+ set_irq_level(oldlevel);
+ thread_wait(current);
+ /* Should never be reached */
+ THREAD_PANICF("switch_core->D:*R", current);
+ }
+
/* Get us off the running list for the current core */
+ RTR_LOCK(core);
remove_from_list_l(&cores[core].running, current);
+ rtr_subtract_entry(core, current->priority);
+ RTR_UNLOCK(core);
/* Stash return value (old core) in a safe place */
current->retval = core;
@@ -2532,39 +2884,31 @@ unsigned int switch_core(unsigned int new_core)
/* Do not use core_schedule_wakeup here since this will result in
* the thread starting to run on the other core before being finished on
- * this one. Delay the wakeup list unlock to keep the other core stuck
+ * this one. Delay the list unlock to keep the other core stuck
* until this thread is ready. */
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- w = LOCK_LIST(&cores[new_core].waking);
- ADD_TO_LIST_L_SELECT(w, &cores[new_core].waking, current);
+ RTR_LOCK(new_core);
+
+ rtr_add_entry(new_core, current->priority);
+ add_to_list_l(&cores[new_core].running, current);
/* Make a callback into device-specific code, unlock the wakeup list so
* that execution may resume on the new core, unlock our slot and finally
* restore the interrupt level */
- cores[core].blk_ops.flags = TBOP_SWITCH_CORE | TBOP_UNLOCK_CURRENT |
- TBOP_UNLOCK_LIST;
- cores[core].blk_ops.list_p = &cores[new_core].waking;
-#if CONFIG_CORELOCK == CORELOCK_SWAP
- cores[core].blk_ops.state = STATE_RUNNING;
- cores[core].blk_ops.list_v = w;
-#endif
+ cores[core].blk_ops.flags = TBOP_SWITCH_CORE;
+ cores[core].blk_ops.cl_p = &cores[new_core].rtr_cl;
+ cores[core].block_task = current;
+
+ UNLOCK_THREAD(current);
+
+ /* Alert other core to activity */
+ core_wake(new_core);
-#ifdef HAVE_PRIORITY_SCHEDULING
- current->priority_x = HIGHEST_PRIORITY;
- cores[core].highest_priority = LOWEST_PRIORITY;
-#endif
/* Do the stack switching, cache_maintenence and switch_thread call -
requires native code */
switch_thread_core(core, current);
-#ifdef HAVE_PRIORITY_SCHEDULING
- current->priority_x = LOWEST_PRIORITY;
- cores[current->core].highest_priority = LOWEST_PRIORITY;
-#endif
-
/* Finally return the old core to caller */
return current->retval;
- (void)state;
}
#endif /* NUM_CORES > 1 */
@@ -2578,12 +2922,11 @@ void init_threads(void)
{
const unsigned int core = CURRENT_CORE;
struct thread_entry *thread;
- int slot;
/* CPU will initialize first and then sleep */
- slot = find_empty_thread_slot();
+ thread = find_empty_thread_slot();
- if (slot >= MAXTHREADS)
+ if (thread == NULL)
{
/* WTF? There really must be a slot available at this stage.
* This can fail if, for example, .bss isn't zero'ed out by the loader
@@ -2592,33 +2935,29 @@ void init_threads(void)
}
/* Initialize initially non-zero members of core */
- thread_queue_init(&cores[core].waking);
cores[core].next_tmo_check = current_tick; /* Something not in the past */
-#ifdef HAVE_PRIORITY_SCHEDULING
- cores[core].highest_priority = LOWEST_PRIORITY;
-#endif
/* Initialize initially non-zero members of slot */
- thread = &threads[slot];
+ UNLOCK_THREAD(thread); /* No sync worries yet */
thread->name = main_thread_name;
- UNLOCK_THREAD_SET_STATE(thread, STATE_RUNNING); /* No sync worries yet */
-#if NUM_CORES > 1
- thread->core = core;
-#endif
+ thread->state = STATE_RUNNING;
+ IF_COP( thread->core = core; )
#ifdef HAVE_PRIORITY_SCHEDULING
+ corelock_init(&cores[core].rtr_cl);
+ thread->base_priority = PRIORITY_USER_INTERFACE;
+ prio_add_entry(&thread->pdist, PRIORITY_USER_INTERFACE);
thread->priority = PRIORITY_USER_INTERFACE;
- thread->priority_x = LOWEST_PRIORITY;
-#endif
-#if CONFIG_CORELOCK == SW_CORELOCK
- corelock_init(&thread->cl);
+ rtr_add_entry(core, PRIORITY_USER_INTERFACE);
#endif
+ corelock_init(&thread->waiter_cl);
+ corelock_init(&thread->slot_cl);
add_to_list_l(&cores[core].running, thread);
if (core == CPU)
{
thread->stack = stackbegin;
- thread->stack_size = (int)stackend - (int)stackbegin;
+ thread->stack_size = (uintptr_t)stackend - (uintptr_t)stackbegin;
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
/* TODO: HAL interface for this */
/* Wake up coprocessor and let it initialize kernel and threads */
@@ -2638,22 +2977,21 @@ void init_threads(void)
/* Get COP safely primed inside switch_thread where it will remain
* until a thread actually exists on it */
CPU_CTL = PROC_WAKE;
- remove_thread(NULL);
+ thread_exit();
#endif /* NUM_CORES */
}
}
-/*---------------------------------------------------------------------------
- * Returns the maximum percentage of stack a thread ever used while running.
- * NOTE: Some large buffer allocations that don't use enough the buffer to
- * overwrite stackptr[0] will not be seen.
- *---------------------------------------------------------------------------
- */
-int thread_stack_usage(const struct thread_entry *thread)
+/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
+#if NUM_CORES == 1
+static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
+#else
+static int stack_usage(uintptr_t *stackptr, size_t stack_size)
+#endif
{
- unsigned int *stackptr = thread->stack;
- int stack_words = thread->stack_size / sizeof (int);
- int i, usage = 0;
+ unsigned int stack_words = stack_size / sizeof (uintptr_t);
+ unsigned int i;
+ int usage = 0;
for (i = 0; i < stack_words; i++)
{
@@ -2667,6 +3005,17 @@ int thread_stack_usage(const struct thread_entry *thread)
return usage;
}
+/*---------------------------------------------------------------------------
+ * Returns the maximum percentage of stack a thread ever used while running.
+ * NOTE: Some large buffer allocations that don't use enough the buffer to
+ * overwrite stackptr[0] will not be seen.
+ *---------------------------------------------------------------------------
+ */
+int thread_stack_usage(const struct thread_entry *thread)
+{
+ return stack_usage(thread->stack, thread->stack_size);
+}
+
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
@@ -2675,19 +3024,7 @@ int thread_stack_usage(const struct thread_entry *thread)
*/
int idle_stack_usage(unsigned int core)
{
- unsigned int *stackptr = idle_stacks[core];
- int i, usage = 0;
-
- for (i = 0; i < IDLE_STACK_WORDS; i++)
- {
- if (stackptr[i] != DEADBEEF)
- {
- usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
- break;
- }
- }
-
- return usage;
+ return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
}
#endif
diff --git a/uisimulator/sdl/SOURCES b/uisimulator/sdl/SOURCES
index 7971c57..1d5b498 100644
--- a/uisimulator/sdl/SOURCES
+++ b/uisimulator/sdl/SOURCES
@@ -1,5 +1,5 @@
button.c
-kernel.c
+kernel-sdl.c
#ifdef HAVE_LCD_BITMAP
lcd-bitmap.c
#elif defined(HAVE_LCD_CHARCELLS)
diff --git a/uisimulator/sdl/kernel-sdl.c b/uisimulator/sdl/kernel-sdl.c
new file mode 100644
index 0000000..b6e6a34
--- /dev/null
+++ b/uisimulator/sdl/kernel-sdl.c
@@ -0,0 +1,168 @@
+/***************************************************************************
+ * __________ __ ___.
+ * Open \______ \ ____ ____ | | _\_ |__ _______ ___
+ * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
+ * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
+ * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
+ * \/ \/ \/ \/ \/
+ * $Id$
+ *
+ * Copyright (C) 2002 by Felix Arends
+ *
+ * All files in this archive are subject to the GNU General Public License.
+ * See the file COPYING in the source tree root for full license agreement.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ****************************************************************************/
+
+#include <stdlib.h>
+#include <SDL.h>
+#include <SDL_thread.h>
+#include "memory.h"
+#include "system-sdl.h"
+#include "uisdl.h"
+#include "kernel.h"
+#include "thread-sdl.h"
+#include "thread.h"
+#include "debug.h"
+
+static SDL_TimerID tick_timer_id;
+long start_tick;
+
+/* Condition to signal that "interrupts" may proceed */
+static SDL_cond *sim_thread_cond;
+/* Mutex to serialize changing levels and exclude other threads while
+ * inside a handler */
+static SDL_mutex *sim_irq_mtx;
+static int interrupt_level = HIGHEST_IRQ_LEVEL;
+static int handlers_pending = 0;
+static int status_reg = 0;
+
+extern void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
+
+/* Nescessary logic:
+ * 1) All threads must pass unblocked
+ * 2) Current handler must always pass unblocked
+ * 3) Threads must be excluded when irq routine is running
+ * 4) No more than one handler routine should execute at a time
+ */
+int set_irq_level(int level)
+{
+ SDL_LockMutex(sim_irq_mtx);
+
+ int oldlevel = interrupt_level;
+
+ if (status_reg == 0 && level == 0 && oldlevel != 0)
+ {
+ /* Not in a handler and "interrupts" are being reenabled */
+ if (handlers_pending > 0)
+ SDL_CondSignal(sim_thread_cond);
+ }
+
+ interrupt_level = level; /* save new level */
+
+ SDL_UnlockMutex(sim_irq_mtx);
+ return oldlevel;
+}
+
+void sim_enter_irq_handler(void)
+{
+ SDL_LockMutex(sim_irq_mtx);
+ handlers_pending++;
+
+ if(interrupt_level != 0)
+ {
+ /* "Interrupts" are disabled. Wait for reenable */
+ SDL_CondWait(sim_thread_cond, sim_irq_mtx);
+ }
+
+ status_reg = 1;
+}
+
+void sim_exit_irq_handler(void)
+{
+ if (--handlers_pending > 0)
+ SDL_CondSignal(sim_thread_cond);
+
+ status_reg = 0;
+ SDL_UnlockMutex(sim_irq_mtx);
+}
+
+bool sim_kernel_init(void)
+{
+ sim_irq_mtx = SDL_CreateMutex();
+ if (sim_irq_mtx == NULL)
+ {
+ fprintf(stderr, "Cannot create sim_handler_mtx\n");
+ return false;
+ }
+
+ sim_thread_cond = SDL_CreateCond();
+ if (sim_thread_cond == NULL)
+ {
+ fprintf(stderr, "Cannot create sim_thread_cond\n");
+ return false;
+ }
+
+ return true;
+}
+
+void sim_kernel_shutdown(void)
+{
+ SDL_RemoveTimer(tick_timer_id);
+ SDL_DestroyMutex(sim_irq_mtx);
+ SDL_DestroyCond(sim_thread_cond);
+}
+
+Uint32 tick_timer(Uint32 interval, void *param)
+{
+ long new_tick;
+
+ (void) interval;
+ (void) param;
+
+ new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
+
+ if(new_tick != current_tick)
+ {
+ long t;
+ for(t = new_tick - current_tick; t > 0; t--)
+ {
+ int i;
+
+ sim_enter_irq_handler();
+
+ /* Run through the list of tick tasks */
+ for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
+ {
+ if(tick_funcs[i])
+ {
+ tick_funcs[i]();
+ }
+ }
+
+ sim_exit_irq_handler();
+ }
+
+ current_tick = new_tick;
+ }
+
+ return 1;
+}
+
+void tick_start(unsigned int interval_in_ms)
+{
+ if (tick_timer_id != NULL)
+ {
+ SDL_RemoveTimer(tick_timer_id);
+ tick_timer_id = NULL;
+ }
+ else
+ {
+ start_tick = SDL_GetTicks();
+ }
+
+ tick_timer_id = SDL_AddTimer(interval_in_ms, tick_timer, NULL);
+}
diff --git a/uisimulator/sdl/kernel.c b/uisimulator/sdl/kernel.c
deleted file mode 100644
index d67fb2b..0000000
--- a/uisimulator/sdl/kernel.c
+++ /dev/null
@@ -1,739 +0,0 @@
-/***************************************************************************
- * __________ __ ___.
- * Open \______ \ ____ ____ | | _\_ |__ _______ ___
- * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
- * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
- * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
- * \/ \/ \/ \/ \/
- * $Id$
- *
- * Copyright (C) 2002 by Felix Arends
- *
- * All files in this archive are subject to the GNU General Public License.
- * See the file COPYING in the source tree root for full license agreement.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- ****************************************************************************/
-
-#include <stdlib.h>
-#include <SDL.h>
-#include <SDL_thread.h>
-#include "memory.h"
-#include "system-sdl.h"
-#include "uisdl.h"
-#include "kernel.h"
-#include "thread-sdl.h"
-#include "thread.h"
-#include "debug.h"
-
-/* Condition to signal that "interrupts" may proceed */
-static SDL_cond *sim_thread_cond;
-/* Mutex to serialize changing levels and exclude other threads while
- * inside a handler */
-static SDL_mutex *sim_irq_mtx;
-static int interrupt_level = HIGHEST_IRQ_LEVEL;
-static int handlers_pending = 0;
-static int status_reg = 0;
-
-extern struct core_entry cores[NUM_CORES];
-
-/* Nescessary logic:
- * 1) All threads must pass unblocked
- * 2) Current handler must always pass unblocked
- * 3) Threads must be excluded when irq routine is running
- * 4) No more than one handler routine should execute at a time
- */
-int set_irq_level(int level)
-{
- SDL_LockMutex(sim_irq_mtx);
-
- int oldlevel = interrupt_level;
-
- if (status_reg == 0 && level == 0 && oldlevel != 0)
- {
- /* Not in a handler and "interrupts" are being reenabled */
- if (handlers_pending > 0)
- SDL_CondSignal(sim_thread_cond);
- }
-
- interrupt_level = level; /* save new level */
-
- SDL_UnlockMutex(sim_irq_mtx);
- return oldlevel;
-}
-
-void sim_enter_irq_handler(void)
-{
- SDL_LockMutex(sim_irq_mtx);
- handlers_pending++;
-
- if(interrupt_level != 0)
- {
- /* "Interrupts" are disabled. Wait for reenable */
- SDL_CondWait(sim_thread_cond, sim_irq_mtx);
- }
-
- status_reg = 1;
-}
-
-void sim_exit_irq_handler(void)
-{
- if (--handlers_pending > 0)
- SDL_CondSignal(sim_thread_cond);
-
- status_reg = 0;
- SDL_UnlockMutex(sim_irq_mtx);
-}
-
-bool sim_kernel_init(void)
-{
- sim_irq_mtx = SDL_CreateMutex();
- if (sim_irq_mtx == NULL)
- {
- fprintf(stderr, "Cannot create sim_handler_mtx\n");
- return false;
- }
-
- sim_thread_cond = SDL_CreateCond();
- if (sim_thread_cond == NULL)
- {
- fprintf(stderr, "Cannot create sim_thread_cond\n");
- return false;
- }
-
- return true;
-}
-
-void sim_kernel_shutdown(void)
-{
- SDL_DestroyMutex(sim_irq_mtx);
- SDL_DestroyCond(sim_thread_cond);
-}
-
-volatile long current_tick = 0;
-static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
-
-/* This array holds all queues that are initiated. It is used for broadcast. */
-static struct event_queue *all_queues[MAX_NUM_QUEUES];
-static int num_queues = 0;
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-/* Moves waiting thread's descriptor to the current sender when a
- message is dequeued */
-static void queue_fetch_sender(struct queue_sender_list *send,
- unsigned int i)
-{
- struct thread_entry **spp = &send->senders[i];
-
- if(*spp)
- {
- send->curr_sender = *spp;
- *spp = NULL;
- }
-}
-
-/* Puts the specified return value in the waiting thread's return value
- and wakes the thread - a sender should be confirmed to exist first */
-static void queue_release_sender(struct thread_entry **sender,
- intptr_t retval)
-{
- (*sender)->retval = retval;
- wakeup_thread_no_listlock(sender);
- if(*sender != NULL)
- {
- fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
- exit(-1);
- }
-}
-
-/* Releases any waiting threads that are queued with queue_send -
- reply with NULL */
-static void queue_release_all_senders(struct event_queue *q)
-{
- if(q->send)
- {
- unsigned int i;
- for(i = q->read; i != q->write; i++)
- {
- struct thread_entry **spp =
- &q->send->senders[i & QUEUE_LENGTH_MASK];
- if(*spp)
- {
- queue_release_sender(spp, 0);
- }
- }
- }
-}
-
-/* Enables queue_send on the specified queue - caller allocates the extra
- data structure */
-void queue_enable_queue_send(struct event_queue *q,
- struct queue_sender_list *send)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- q->send = NULL;
- if(send)
- {
- q->send = send;
- memset(send, 0, sizeof(*send));
- }
- set_irq_level(oldlevel);
-}
-#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
-
-void queue_init(struct event_queue *q, bool register_queue)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- q->read = 0;
- q->write = 0;
- thread_queue_init(&q->queue);
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- q->send = NULL; /* No message sending by default */
-#endif
-
- if(register_queue)
- {
- if(num_queues >= MAX_NUM_QUEUES)
- {
- fprintf(stderr, "queue_init->out of queues");
- exit(-1);
- }
- /* Add it to the all_queues array */
- all_queues[num_queues++] = q;
- }
-
- set_irq_level(oldlevel);
-}
-
-void queue_delete(struct event_queue *q)
-{
- int i;
- bool found = false;
-
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- /* Find the queue to be deleted */
- for(i = 0;i < num_queues;i++)
- {
- if(all_queues[i] == q)
- {
- found = true;
- break;
- }
- }
-
- if(found)
- {
- /* Move the following queues up in the list */
- for(;i < num_queues-1;i++)
- {
- all_queues[i] = all_queues[i+1];
- }
-
- num_queues--;
- }
-
- /* Release threads waiting on queue head */
- thread_queue_wake(&q->queue);
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- /* Release waiting threads and reply to any dequeued message
- waiting for one. */
- queue_release_all_senders(q);
- queue_reply(q, 0);
-#endif
-
- q->read = 0;
- q->write = 0;
-
- set_irq_level(oldlevel);
-}
-
-void queue_wait(struct event_queue *q, struct queue_event *ev)
-{
- unsigned int rd;
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if (q->send && q->send->curr_sender)
- {
- /* auto-reply */
- queue_release_sender(&q->send->curr_sender, 0);
- }
-#endif
-
- if (q->read == q->write)
- {
- do
- {
- block_thread(&q->queue);
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- }
- while (q->read == q->write);
- }
-
- rd = q->read++ & QUEUE_LENGTH_MASK;
- *ev = q->events[rd];
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send && q->send->senders[rd])
- {
- /* Get data for a waiting thread if one */
- queue_fetch_sender(q->send, rd);
- }
-#endif
-
- set_irq_level(oldlevel);
-}
-
-void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if (q->send && q->send->curr_sender)
- {
- /* auto-reply */
- queue_release_sender(&q->send->curr_sender, 0);
- }
-#endif
-
- if (q->read == q->write && ticks > 0)
- {
- block_thread_w_tmo(&q->queue, ticks);
- oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- }
-
- if(q->read != q->write)
- {
- unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
- *ev = q->events[rd];
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send && q->send->senders[rd])
- {
- /* Get data for a waiting thread if one */
- queue_fetch_sender(q->send, rd);
- }
-#endif
- }
- else
- {
- ev->id = SYS_TIMEOUT;
- }
-
- set_irq_level(oldlevel);
-}
-
-void queue_post(struct event_queue *q, long id, intptr_t data)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
-
- q->events[wr].id = id;
- q->events[wr].data = data;
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send)
- {
- struct thread_entry **spp = &q->send->senders[wr];
-
- if(*spp)
- {
- /* overflow protect - unblock any thread waiting at this index */
- queue_release_sender(spp, 0);
- }
- }
-#endif
-
- wakeup_thread(&q->queue);
-
- set_irq_level(oldlevel);
-}
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
-intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
-{
- int oldlevel = set_irq_level(oldlevel);
-
- unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
-
- q->events[wr].id = id;
- q->events[wr].data = data;
-
- if(q->send)
- {
- struct thread_entry **spp = &q->send->senders[wr];
-
- if(*spp)
- {
- /* overflow protect - unblock any thread waiting at this index */
- queue_release_sender(spp, 0);
- }
-
- wakeup_thread(&q->queue);
-
- block_thread_no_listlock(spp);
- return thread_get_current()->retval;
- }
-
- /* Function as queue_post if sending is not enabled */
- wakeup_thread(&q->queue);
- set_irq_level(oldlevel);
- return 0;
-}
-
-#if 0 /* not used now but probably will be later */
-/* Query if the last message dequeued was added by queue_send or not */
-bool queue_in_queue_send(struct event_queue *q)
-{
- return q->send && q->send->curr_sender;
-}
-#endif
-
-/* Replies with retval to any dequeued message sent with queue_send */
-void queue_reply(struct event_queue *q, intptr_t retval)
-{
- if(q->send && q->send->curr_sender)
- {
- queue_release_sender(&q->send->curr_sender, retval);
- }
-}
-#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
-
-bool queue_empty(const struct event_queue* q)
-{
- return ( q->read == q->write );
-}
-
-bool queue_peek(struct event_queue *q, struct queue_event *ev)
-{
- if (q->read == q->write)
- return false;
-
- bool have_msg = false;
-
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- if (q->read != q->write)
- {
- *ev = q->events[q->read & QUEUE_LENGTH_MASK];
- have_msg = true;
- }
-
- set_irq_level(oldlevel);
-
- return have_msg;
-}
-
-void queue_clear(struct event_queue* q)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- /* fixme: This is potentially unsafe in case we do interrupt-like processing */
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- /* Release all thread waiting in the queue for a reply -
- dequeued sent message will be handled by owning thread */
- queue_release_all_senders(q);
-#endif
- q->read = 0;
- q->write = 0;
-
- set_irq_level(oldlevel);
-}
-
-void queue_remove_from_head(struct event_queue *q, long id)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
-
- while(q->read != q->write)
- {
- unsigned int rd = q->read & QUEUE_LENGTH_MASK;
-
- if(q->events[rd].id != id)
- {
- break;
- }
-
-#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
- if(q->send)
- {
- struct thread_entry **spp = &q->send->senders[rd];
-
- if(*spp)
- {
- /* Release any thread waiting on this message */
- queue_release_sender(spp, 0);
- }
- }
-#endif
- q->read++;
- }
-
- set_irq_level(oldlevel);
-}
-
-int queue_count(const struct event_queue *q)
-{
- return q->write - q->read;
-}
-
-int queue_broadcast(long id, intptr_t data)
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- int i;
-
- for(i = 0;i < num_queues;i++)
- {
- queue_post(all_queues[i], id, data);
- }
-
- set_irq_level(oldlevel);
- return num_queues;
-}
-
-void yield(void)
-{
- switch_thread(NULL);
-}
-
-void sleep(int ticks)
-{
- sleep_thread(ticks);
-}
-
-void sim_tick_tasks(void)
-{
- int i;
-
- /* Run through the list of tick tasks */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i])
- {
- tick_funcs[i]();
- }
- }
-}
-
-int tick_add_task(void (*f)(void))
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- int i;
-
- /* Add a task if there is room */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i] == NULL)
- {
- tick_funcs[i] = f;
- set_irq_level(oldlevel);
- return 0;
- }
- }
- fprintf(stderr, "Error! tick_add_task(): out of tasks");
- exit(-1);
- return -1;
-}
-
-int tick_remove_task(void (*f)(void))
-{
- int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
- int i;
-
- /* Remove a task if it is there */
- for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
- {
- if(tick_funcs[i] == f)
- {
- tick_funcs[i] = NULL;
- set_irq_level(oldlevel);
- return 0;
- }
- }
-
- set_irq_level(oldlevel);
- return -1;
-}
-
-/* Very simple mutex simulation - won't work with pre-emptive
- multitasking, but is better than nothing at all */
-void mutex_init(struct mutex *m)
-{
- m->queue = NULL;
- m->thread = NULL;
- m->count = 0;
- m->locked = 0;
-}
-
-void mutex_lock(struct mutex *m)
-{
- struct thread_entry *const thread = thread_get_current();
-
- if(thread == m->thread)
- {
- m->count++;
- return;
- }
-
- if (!test_and_set(&m->locked, 1))
- {
- m->thread = thread;
- return;
- }
-
- block_thread_no_listlock(&m->queue);
-}
-
-void mutex_unlock(struct mutex *m)
-{
- /* unlocker not being the owner is an unlocking violation */
- if(m->thread != thread_get_current())
- {
- fprintf(stderr, "mutex_unlock->wrong thread");
- exit(-1);
- }
-
- if (m->count > 0)
- {
- /* this thread still owns lock */
- m->count--;
- return;
- }
-
- m->thread = wakeup_thread_no_listlock(&m->queue);
-
- if (m->thread == NULL)
- {
- /* release lock */
- m->locked = 0;
- }
-}
-
-#ifdef HAVE_SEMAPHORE_OBJECTS
-void semaphore_init(struct semaphore *s, int max, int start)
-{
- if(max <= 0 || start < 0 || start > max)
- {
- fprintf(stderr, "semaphore_init->inv arg");
- exit(-1);
- }
- s->queue = NULL;
- s->max = max;
- s->count = start;
-}
-
-void semaphore_wait(struct semaphore *s)
-{
- if(--s->count >= 0)
- return;
- block_thread_no_listlock(&s->queue);
-}
-
-void semaphore_release(struct semaphore *s)
-{
- if(s->count < s->max)
- {
- if(++s->count <= 0)
- {
- if(s->queue == NULL)
- {
- /* there should be threads in this queue */
- fprintf(stderr, "semaphore->wakeup");
- exit(-1);
- }
- /* a thread was queued - wake it up */
- wakeup_thread_no_listlock(&s->queue);
- }
- }
-}
-#endif /* HAVE_SEMAPHORE_OBJECTS */
-
-#ifdef HAVE_EVENT_OBJECTS
-void event_init(struct event *e, unsigned int flags)
-{
- e->queues[STATE_NONSIGNALED] = NULL;
- e->queues[STATE_SIGNALED] = NULL;
- e->state = flags & STATE_SIGNALED;
- e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
-}
-
-void event_wait(struct event *e, unsigned int for_state)
-{
- unsigned int last_state = e->state;
-
- if(e->automatic != 0)
- {
- /* wait for false always satisfied by definition
- or if it just changed to false */
- if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
- {
- /* automatic - unsignal */
- e->state = STATE_NONSIGNALED;
- return;
- }
- /* block until state matches */
- }
- else if(for_state == last_state)
- {
- /* the state being waited for is the current state */
- return;
- }
-
- /* current state does not match wait-for state */
- block_thread_no_listlock(&e->queues[for_state]);
-}
-
-void event_set_state(struct event *e, unsigned int state)
-{
- unsigned int last_state = e->state;
-
- if(last_state == state)
- {
- /* no change */
- return;
- }
-
- if(state == STATE_SIGNALED)
- {
- if(e->automatic != 0)
- {
- struct thread_entry *thread;
-
- if(e->queues[STATE_NONSIGNALED] != NULL)
- {
- /* no thread should have ever blocked for nonsignaled */
- fprintf(stderr, "set_event_state->queue[NS]:S");
- exit(-1);
- }
-
- /* pass to next thread and keep unsignaled - "pulse" */
- thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
- e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
- }
- else
- {
- /* release all threads waiting for signaled */
- thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
- e->state = STATE_SIGNALED;
- }
- }
- else
- {
- /* release all threads waiting for unsignaled */
- if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
- {
- /* no thread should have ever blocked */
- fprintf(stderr, "set_event_state->queue[NS]:NS");
- exit(-1);
- }
-
- thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
- e->state = STATE_NONSIGNALED;
- }
-}
-#endif /* HAVE_EVENT_OBJECTS */
diff --git a/uisimulator/sdl/system-sdl.h b/uisimulator/sdl/system-sdl.h
index 2197a01..c5e7d40 100644
--- a/uisimulator/sdl/system-sdl.h
+++ b/uisimulator/sdl/system-sdl.h
@@ -29,4 +29,6 @@ void sim_exit_irq_handler(void);
bool sim_kernel_init(void);
void sim_kernel_shutdown(void);
+extern long start_tick;
+
#endif /* _SYSTEM_SDL_H_ */
diff --git a/uisimulator/sdl/thread-sdl.c b/uisimulator/sdl/thread-sdl.c
index d1a8e60..78a66f7 100644
--- a/uisimulator/sdl/thread-sdl.c
+++ b/uisimulator/sdl/thread-sdl.c
@@ -26,6 +26,7 @@
#include <setjmp.h>
#include "system-sdl.h"
#include "thread-sdl.h"
+#include "system.h"
#include "kernel.h"
#include "thread.h"
#include "debug.h"
@@ -37,7 +38,7 @@
#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
static char __name[32];
#define THREAD_SDL_GET_NAME(thread) \
- ({ thread_get_name(__name, sizeof(__name)/sizeof(__name[0]), thread); __name; })
+ ({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
#else
#define THREAD_SDL_DEBUGF(...)
#define THREAD_SDL_GET_NAME(thread)
@@ -54,7 +55,6 @@ struct thread_entry threads[MAXTHREADS];
* way to get them back in there so they may exit */
static jmp_buf thread_jmpbufs[MAXTHREADS];
static SDL_mutex *m;
-static struct thread_entry *running;
static bool threads_exit = false;
extern long start_tick;
@@ -78,7 +78,7 @@ void thread_sdl_shutdown(void)
{
/* Signal thread on delay or block */
SDL_Thread *t = thread->context.t;
- SDL_CondSignal(thread->context.c);
+ SDL_SemPost(thread->context.s);
SDL_UnlockMutex(m);
/* Wait for it to finish */
SDL_WaitThread(t, NULL);
@@ -98,7 +98,7 @@ extern void app_main(void *param);
static int thread_sdl_app_main(void *param)
{
SDL_LockMutex(m);
- running = &threads[0];
+ cores[CURRENT_CORE].running = &threads[0];
/* Set the jump address for return */
if (setjmp(thread_jmpbufs[0]) == 0)
@@ -116,6 +116,8 @@ static int thread_sdl_app_main(void *param)
/* Initialize SDL threading */
bool thread_sdl_init(void *param)
{
+ struct thread_entry *thread;
+ memset(cores, 0, sizeof(cores));
memset(threads, 0, sizeof(threads));
m = SDL_CreateMutex();
@@ -129,28 +131,30 @@ bool thread_sdl_init(void *param)
/* Slot 0 is reserved for the main thread - initialize it here and
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
- running = &threads[0];
- running->stack = " ";
- running->stack_size = 8;
- running->name = "main";
- running->state = STATE_RUNNING;
- running->context.c = SDL_CreateCond();
+ thread = &threads[0];
+ thread->stack = (uintptr_t *)" ";
+ thread->stack_size = 8;
+ thread->name = "main";
+ thread->state = STATE_RUNNING;
+ thread->context.s = SDL_CreateSemaphore(0);
+ cores[CURRENT_CORE].running = thread;
- if (running->context.c == NULL)
+ if (thread->context.s == NULL)
{
- fprintf(stderr, "Failed to create main condition variable\n");
+ fprintf(stderr, "Failed to create main semaphore\n");
return false;
}
- running->context.t = SDL_CreateThread(thread_sdl_app_main, param);
+ thread->context.t = SDL_CreateThread(thread_sdl_app_main, param);
- if (running->context.t == NULL)
+ if (thread->context.t == NULL)
{
+ SDL_DestroySemaphore(thread->context.s);
fprintf(stderr, "Failed to create main thread\n");
return false;
}
- THREAD_SDL_DEBUGF("Main thread: %p\n", running);
+ THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
SDL_UnlockMutex(m);
return true;
@@ -160,21 +164,22 @@ bool thread_sdl_init(void *param)
void thread_sdl_thread_lock(void *me)
{
SDL_LockMutex(m);
- running = (struct thread_entry *)me;
+ cores[CURRENT_CORE].running = (struct thread_entry *)me;
if (threads_exit)
- remove_thread(NULL);
+ thread_exit();
}
void * thread_sdl_thread_unlock(void)
{
- struct thread_entry *current = running;
+ struct thread_entry *current = cores[CURRENT_CORE].running;
SDL_UnlockMutex(m);
return current;
}
-static int find_empty_thread_slot(void)
+static struct thread_entry * find_empty_thread_slot(void)
{
+ struct thread_entry *thread = NULL;
int n;
for (n = 0; n < MAXTHREADS; n++)
@@ -182,10 +187,13 @@ static int find_empty_thread_slot(void)
int state = threads[n].state;
if (state == STATE_KILLED)
+ {
+ thread = &threads[n];
break;
+ }
}
- return n;
+ return thread;
}
static void add_to_list_l(struct thread_entry **list,
@@ -229,64 +237,163 @@ static void remove_from_list_l(struct thread_entry **list,
thread->l.next->l.prev = thread->l.prev;
}
-static inline void run_blocking_ops(void)
-{
- set_irq_level(0);
-}
-
struct thread_entry *thread_get_current(void)
{
- return running;
+ return cores[CURRENT_CORE].running;
}
-void switch_thread(struct thread_entry *old)
+void switch_thread(void)
{
- struct thread_entry *current = running;
+ struct thread_entry *current = cores[CURRENT_CORE].running;
- SDL_UnlockMutex(m);
- /* Any other thread waiting already will get it first */
- SDL_LockMutex(m);
- running = current;
+ set_irq_level(0);
- if (threads_exit)
- remove_thread(NULL);
+ switch (current->state)
+ {
+ case STATE_RUNNING:
+ {
+ SDL_UnlockMutex(m);
+ /* Any other thread waiting already will get it first */
+ SDL_LockMutex(m);
+ break;
+ } /* STATE_RUNNING: */
+
+ case STATE_BLOCKED:
+ {
+ int oldlevel;
+
+ SDL_UnlockMutex(m);
+ SDL_SemWait(current->context.s);
+ SDL_LockMutex(m);
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+ current->state = STATE_RUNNING;
+ set_irq_level(oldlevel);
+ break;
+ } /* STATE_BLOCKED: */
+
+ case STATE_BLOCKED_W_TMO:
+ {
+ int result, oldlevel;
+
+ SDL_UnlockMutex(m);
+ result = SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
+ SDL_LockMutex(m);
+
+ oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
+
+ if (current->state == STATE_BLOCKED_W_TMO)
+ {
+ /* Timed out */
+ remove_from_list_l(current->bqp, current);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (current->wakeup_ext_cb != NULL)
+ current->wakeup_ext_cb(current);
+#endif
+ current->state = STATE_RUNNING;
+ }
- (void)old;
+ if (result == SDL_MUTEX_TIMEDOUT)
+ {
+ /* Other signals from an explicit wake could have been made before
+ * arriving here if we timed out waiting for the semaphore. Make
+ * sure the count is reset. */
+ while (SDL_SemValue(current->context.s) > 0)
+ SDL_SemTryWait(current->context.s);
+ }
+
+ set_irq_level(oldlevel);
+ break;
+ } /* STATE_BLOCKED_W_TMO: */
+
+ case STATE_SLEEPING:
+ {
+ SDL_UnlockMutex(m);
+ SDL_SemWaitTimeout(current->context.s, current->tmo_tick);
+ SDL_LockMutex(m);
+ current->state = STATE_RUNNING;
+ break;
+ } /* STATE_SLEEPING: */
+ }
+
+ cores[CURRENT_CORE].running = current;
+
+ if (threads_exit)
+ thread_exit();
}
void sleep_thread(int ticks)
{
- struct thread_entry *current;
+ struct thread_entry *current = cores[CURRENT_CORE].running;
int rem;
- current = running;
current->state = STATE_SLEEPING;
rem = (SDL_GetTicks() - start_tick) % (1000/HZ);
if (rem < 0)
rem = 0;
- rem = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
+ current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
+}
+
+void block_thread(struct thread_entry *current)
+{
+ current->state = STATE_BLOCKED;
+ add_to_list_l(current->bqp, current);
+}
+
+void block_thread_w_tmo(struct thread_entry *current, int ticks)
+{
+ current->state = STATE_BLOCKED_W_TMO;
+ current->tmo_tick = (1000/HZ)*ticks;
+ add_to_list_l(current->bqp, current);
+}
+
+unsigned int wakeup_thread(struct thread_entry **list)
+{
+ struct thread_entry *thread = *list;
- if (rem == 0)
+ if (thread != NULL)
{
- /* Unlock and give up rest of quantum */
- SDL_UnlockMutex(m);
- SDL_Delay(0);
- SDL_LockMutex(m);
+ switch (thread->state)
+ {
+ case STATE_BLOCKED:
+ case STATE_BLOCKED_W_TMO:
+ remove_from_list_l(list, thread);
+ thread->state = STATE_RUNNING;
+ SDL_SemPost(thread->context.s);
+ return THREAD_OK;
+ }
}
- else
+
+ return THREAD_NONE;
+}
+
+unsigned int thread_queue_wake(struct thread_entry **list)
+{
+ unsigned int result = THREAD_NONE;
+
+ for (;;)
{
- /* These sleeps must be signalable for thread exit */
- SDL_CondWaitTimeout(current->context.c, m, rem);
- }
+ unsigned int rc = wakeup_thread(list);
- running = current;
+ if (rc == THREAD_NONE)
+ break;
- current->state = STATE_RUNNING;
+ result |= rc;
+ }
- if (threads_exit)
- remove_thread(NULL);
+ return result;
+}
+
+void thread_thaw(struct thread_entry *thread)
+{
+ if (thread->state == STATE_FROZEN)
+ {
+ thread->state = STATE_RUNNING;
+ SDL_SemPost(thread->context.s);
+ }
}
int runthread(void *data)
@@ -297,9 +404,9 @@ int runthread(void *data)
/* Cannot access thread variables before locking the mutex as the
data structures may not be filled-in yet. */
SDL_LockMutex(m);
- running = (struct thread_entry *)data;
- current = running;
- current_jmpbuf = &thread_jmpbufs[running - threads];
+ cores[CURRENT_CORE].running = (struct thread_entry *)data;
+ current = cores[CURRENT_CORE].running;
+ current_jmpbuf = &thread_jmpbufs[current - threads];
/* Setup jump for exit */
if (setjmp(*current_jmpbuf) == 0)
@@ -307,9 +414,10 @@ int runthread(void *data)
/* Run the thread routine */
if (current->state == STATE_FROZEN)
{
- SDL_CondWait(current->context.c, m);
- running = current;
-
+ SDL_UnlockMutex(m);
+ SDL_SemWait(current->context.s);
+ SDL_LockMutex(m);
+ cores[CURRENT_CORE].running = current;
}
if (!threads_exit)
@@ -320,7 +428,7 @@ int runthread(void *data)
/* Thread routine returned - suicide */
}
- remove_thread(NULL);
+ thread_exit();
}
else
{
@@ -332,131 +440,59 @@ int runthread(void *data)
}
struct thread_entry*
- create_thread(void (*function)(void), void* stack, int stack_size,
+ create_thread(void (*function)(void), void* stack, size_t stack_size,
unsigned flags, const char *name)
{
- /** Avoid compiler warnings */
+ struct thread_entry *thread;
SDL_Thread* t;
- SDL_cond *cond;
- int slot;
+ SDL_sem *s;
THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
- slot = find_empty_thread_slot();
- if (slot >= MAXTHREADS)
+ thread = find_empty_thread_slot();
+ if (thread == NULL)
{
DEBUGF("Failed to find thread slot\n");
return NULL;
}
- cond = SDL_CreateCond();
- if (cond == NULL)
+ s = SDL_CreateSemaphore(0);
+ if (s == NULL)
{
- DEBUGF("Failed to create condition variable\n");
+ DEBUGF("Failed to create semaphore\n");
return NULL;
}
- t = SDL_CreateThread(runthread, &threads[slot]);
+ t = SDL_CreateThread(runthread, thread);
if (t == NULL)
{
DEBUGF("Failed to create SDL thread\n");
- SDL_DestroyCond(cond);
+ SDL_DestroySemaphore(s);
return NULL;
}
- threads[slot].stack = stack;
- threads[slot].stack_size = stack_size;
- threads[slot].name = name;
- threads[slot].state = (flags & CREATE_THREAD_FROZEN) ?
+ thread->stack = stack;
+ thread->stack_size = stack_size;
+ thread->name = name;
+ thread->state = (flags & CREATE_THREAD_FROZEN) ?
STATE_FROZEN : STATE_RUNNING;
- threads[slot].context.start = function;
- threads[slot].context.t = t;
- threads[slot].context.c = cond;
+ thread->context.start = function;
+ thread->context.t = t;
+ thread->context.s = s;
THREAD_SDL_DEBUGF("New Thread: %d (%s)\n",
- slot, THREAD_SDL_GET_NAME(&threads[slot]));
+ thread - threads, THREAD_SDL_GET_NAME(thread));
- return &threads[slot];
-}
-
-void _block_thread(struct thread_queue *tq)
-{
- struct thread_entry *thread = running;
-
- thread->state = STATE_BLOCKED;
- thread->bqp = tq;
- add_to_list_l(&tq->queue, thread);
-
- run_blocking_ops();
-
- SDL_CondWait(thread->context.c, m);
- running = thread;
-
- if (threads_exit)
- remove_thread(NULL);
-}
-
-void block_thread_w_tmo(struct thread_queue *tq, int ticks)
-{
- struct thread_entry *thread = running;
-
- thread->state = STATE_BLOCKED_W_TMO;
- thread->bqp = tq;
- add_to_list_l(&tq->queue, thread);
-
- run_blocking_ops();
-
- SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks);
- running = thread;
-
- if (thread->state == STATE_BLOCKED_W_TMO)
- {
- /* Timed out */
- remove_from_list_l(&tq->queue, thread);
- thread->state = STATE_RUNNING;
- }
-
- if (threads_exit)
- remove_thread(NULL);
-}
-
-struct thread_entry * _wakeup_thread(struct thread_queue *tq)
-{
- struct thread_entry *thread = tq->queue;
-
- if (thread == NULL)
- {
- return NULL;
- }
-
- switch (thread->state)
- {
- case STATE_BLOCKED:
- case STATE_BLOCKED_W_TMO:
- remove_from_list_l(&tq->queue, thread);
- thread->state = STATE_RUNNING;
- SDL_CondSignal(thread->context.c);
- return thread;
- default:
- return NULL;
- }
-}
-
-void thread_thaw(struct thread_entry *thread)
-{
- if (thread->state == STATE_FROZEN)
- {
- thread->state = STATE_RUNNING;
- SDL_CondSignal(thread->context.c);
- }
+ return thread;
}
void init_threads(void)
{
/* Main thread is already initialized */
- if (running != &threads[0])
+ if (cores[CURRENT_CORE].running != &threads[0])
{
- THREAD_PANICF("Wrong main thread in init_threads: %p\n", running);
+ THREAD_PANICF("Wrong main thread in init_threads: %p\n",
+ cores[CURRENT_CORE].running);
}
THREAD_SDL_DEBUGF("First Thread: %d (%s)\n",
@@ -465,9 +501,9 @@ void init_threads(void)
void remove_thread(struct thread_entry *thread)
{
- struct thread_entry *current = running;
+ struct thread_entry *current = cores[CURRENT_CORE].running;
SDL_Thread *t;
- SDL_cond *c;
+ SDL_sem *s;
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
@@ -477,7 +513,7 @@ void remove_thread(struct thread_entry *thread)
}
t = thread->context.t;
- c = thread->context.c;
+ s = thread->context.s;
thread->context.t = NULL;
if (thread != current)
@@ -487,20 +523,25 @@ void remove_thread(struct thread_entry *thread)
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
/* Remove thread from object it's waiting on */
- remove_from_list_l(&thread->bqp->queue, thread);
+ remove_from_list_l(thread->bqp, thread);
+
+#ifdef HAVE_WAKEUP_EXT_CB
+ if (thread->wakeup_ext_cb != NULL)
+ thread->wakeup_ext_cb(thread);
+#endif
break;
}
- SDL_CondSignal(c);
+ SDL_SemPost(s);
}
THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
thread - threads, THREAD_SDL_GET_NAME(thread));
- thread_queue_wake_no_listlock(&thread->queue);
thread->state = STATE_KILLED;
+ thread_queue_wake(&thread->queue);
- SDL_DestroyCond(c);
+ SDL_DestroySemaphore(s);
if (thread == current)
{
@@ -514,14 +555,23 @@ void remove_thread(struct thread_entry *thread)
set_irq_level(oldlevel);
}
+void thread_exit(void)
+{
+ remove_thread(NULL);
+}
+
void thread_wait(struct thread_entry *thread)
{
+ struct thread_entry *current = cores[CURRENT_CORE].running;
+
if (thread == NULL)
- thread = running;
+ thread = current;
if (thread->state != STATE_KILLED)
{
- block_thread_no_listlock(&thread->queue);
+ current->bqp = &thread->queue;
+ block_thread(current);
+ switch_thread();
}
}
diff --git a/uisimulator/sdl/uisdl.c b/uisimulator/sdl/uisdl.c
index e0a449e..0921092 100644
--- a/uisimulator/sdl/uisdl.c
+++ b/uisimulator/sdl/uisdl.c
@@ -40,19 +40,13 @@
#include "SDL_thread.h"
/* extern functions */
-extern void app_main (void *); /* mod entry point */
-extern void new_key(int key);
-extern void sim_tick_tasks(void);
-extern bool sim_io_init(void);
-extern void sim_io_shutdown(void);
+extern void new_key(int key);
void button_event(int key, bool pressed);
SDL_Surface *gui_surface;
bool background = false; /* Don't use backgrounds by default */
-SDL_TimerID tick_timer_id;
-
bool lcd_display_redraw = true; /* Used for player simulator */
char having_new_lcd = true; /* Used for player simulator */
bool sim_alarm_wakeup = false;
@@ -63,31 +57,6 @@ bool debug_audio = false;
bool debug_wps = false;
int wps_verbose_level = 3;
-long start_tick;
-
-Uint32 tick_timer(Uint32 interval, void *param)
-{
- long new_tick;
-
- (void) interval;
- (void) param;
-
- new_tick = (SDL_GetTicks() - start_tick) / (1000/HZ);
-
- if (new_tick != current_tick) {
- long i;
- for (i = new_tick - current_tick; i > 0; i--)
- {
- sim_enter_irq_handler();
- sim_tick_tasks();
- sim_exit_irq_handler();
- }
- current_tick = new_tick;
- }
-
- return 1;
-}
-
void gui_message_loop(void)
{
SDL_Event event;
@@ -181,8 +150,6 @@ bool gui_startup(void)
SDL_UpdateRect(gui_surface, 0, 0, 0, 0);
}
- start_tick = SDL_GetTicks();
-
return true;
}
@@ -191,7 +158,6 @@ bool gui_shutdown(void)
/* Order here is relevent to prevent deadlocks and use of destroyed
sync primitives by kernel threads */
thread_sdl_shutdown();
- SDL_RemoveTimer(tick_timer_id);
sim_kernel_shutdown();
return true;
}
@@ -287,8 +253,6 @@ int main(int argc, char *argv[])
return -1;
}
- tick_timer_id = SDL_AddTimer(10, tick_timer, NULL);
-
gui_message_loop();
return gui_shutdown();