Messages queues must be guarded on both ends or else it's a race between detecting a message present and missing a wakeup on thread about to wait. Keeping IRQs from interacting with the scheduler would be preferable but this should do at the moment. Add more detailed panic info regarding blocking violations so we know who. Make panicf function well enough on Gigabeat and PortalPlayer targets. Move the core sleep instructions into a CPU-specific inline to keep thing organized.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@13374 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2007-05-12 05:20:04 +00:00
parent 87c70db578
commit bfb281ff63
5 changed files with 124 additions and 68 deletions

View File

@ -211,6 +211,11 @@ static inline uint32_t swap_odd_even32(uint32_t value)
#endif /* !SIMULATOR */
/* Declare this as HIGHEST_IRQ_LEVEL if they don't differ */
#ifndef DISABLE_INTERRUPTS
#define DISABLE_INTERRUPTS HIGHEST_IRQ_LEVEL
#endif
/* Just define these as empty if not declared */
#ifndef HAVE_INVALIDATE_ICACHE
#define invalidate_icache()

View File

@ -131,10 +131,8 @@ struct core_entry {
volatile bool kernel_running;
#endif
long last_tick;
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
int switch_to_irq_level;
#define STAY_IRQ_LEVEL -1
#endif
};
#ifdef HAVE_PRIORITY_SCHEDULING
@ -205,13 +203,9 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list);
void sleep_thread(int ticks);
void block_thread(struct thread_entry **thread);
void block_thread_w_tmo(struct thread_entry **thread, int timeout);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
void set_irq_level_and_block_thread(struct thread_entry **thread, int level);
#if 0
void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
int timeout, int level)
#endif
#endif
int timeout, int level);
void wakeup_thread(struct thread_entry **thread);
void wakeup_thread_irq_safe(struct thread_entry **thread);
#ifdef HAVE_PRIORITY_SCHEDULING

View File

@ -100,8 +100,6 @@ void yield(void)
static void queue_fetch_sender(struct queue_sender_list *send,
unsigned int i)
{
/* Disable interrupts to protect against collision in this slot */
int old_level = set_irq_level(HIGHEST_IRQ_LEVEL);
struct thread_entry **spp = &send->senders[i];
if (*spp)
@ -109,8 +107,6 @@ static void queue_fetch_sender(struct queue_sender_list *send,
send->curr_sender = *spp;
*spp = NULL;
}
set_irq_level(old_level);
}
/* Puts the specified return value in the waiting thread's return value
@ -208,8 +204,18 @@ void queue_delete(struct event_queue *q)
int i;
bool found = false;
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
/* Release theads waiting on queue */
wakeup_thread(&q->thread);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* Release waiting threads and reply to any dequeued message
waiting for one. */
queue_release_all_senders(q);
queue_reply(q, 0);
#endif
/* Find the queue to be deleted */
for(i = 0;i < num_queues;i++)
@ -223,14 +229,6 @@ void queue_delete(struct event_queue *q)
if(found)
{
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* Release waiting threads and reply to any dequeued message
waiting for one. */
int level = set_irq_level(HIGHEST_IRQ_LEVEL);
queue_release_all_senders(q);
queue_reply(q, NULL);
set_irq_level(level);
#endif
/* Move the following queues up in the list */
for(;i < num_queues-1;i++)
{
@ -241,17 +239,21 @@ void queue_delete(struct event_queue *q)
}
unlock_cores();
set_irq_level(oldlevel);
}
void queue_wait(struct event_queue *q, struct event *ev)
{
int oldlevel;
unsigned int rd;
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
if (q->read == q->write)
{
block_thread(&q->thread);
set_irq_level_and_block_thread(&q->thread, oldlevel);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
}
@ -267,15 +269,18 @@ void queue_wait(struct event_queue *q, struct event *ev)
#endif
unlock_cores();
set_irq_level(oldlevel);
}
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
if (q->read == q->write && ticks > 0)
{
block_thread_w_tmo(&q->thread, ticks);
set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
}
@ -298,6 +303,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
}
unlock_cores();
set_irq_level(oldlevel);
}
void queue_post(struct event_queue *q, long id, intptr_t data)
@ -388,14 +394,10 @@ bool queue_in_queue_send(struct event_queue *q)
void queue_reply(struct event_queue *q, intptr_t retval)
{
lock_cores();
/* No IRQ lock here since IRQs cannot change this */
if(q->send && q->send->curr_sender)
{
int level = set_irq_level(HIGHEST_IRQ_LEVEL);
if(q->send->curr_sender)
{
queue_release_sender(&q->send->curr_sender, retval);
}
set_irq_level(level);
queue_release_sender(&q->send->curr_sender, retval);
}
unlock_cores();
}

View File

@ -45,13 +45,13 @@ void panicf( const char *fmt, ...)
#endif
/* Disable interrupts */
#if CONFIG_CPU == SH7034
asm volatile ("ldc\t%0,sr" : : "r"(15<<4));
#elif defined(CPU_COLDFIRE)
asm volatile ("move.w #0x2700,%sr");
#endif
#ifdef CPU_ARM
disable_fiq();
#endif
set_irq_level(DISABLE_INTERRUPTS);
#endif /* SIMULATOR */
va_start( ap, fmt );
vsnprintf( panic_buf, sizeof(panic_buf), fmt, ap );
va_end( ap );
@ -99,7 +99,16 @@ void panicf( const char *fmt, ...)
#endif
/* try to restart firmware if ON is pressed */
#ifdef IRIVER_H100_SERIES
#if defined (CPU_PP)
/* For now, just sleep the core */
if (CURRENT_CORE == CPU)
CPU_CTL = PROC_SLEEP;
else
COP_CTL = PROC_SLEEP;
#define system_reboot() nop
#elif defined (TOSHIBA_GIGABEAT_F)
if ((GPGDAT & (1 << 0)) != 0)
#elif defined (IRIVER_H100_SERIES)
if ((GPIO1_READ & 0x22) == 0) /* check for ON button and !hold */
#elif defined(IRIVER_H300_SERIES)
if ((GPIO1_READ & 0x22) == 0) /* check for ON button and !hold */

View File

@ -20,6 +20,7 @@
#include <stdbool.h>
#include "thread.h"
#include "panic.h"
#include "sprintf.h"
#include "system.h"
#include "kernel.h"
#include "cpu.h"
@ -71,7 +72,7 @@ static long cores_locked IBSS_ATTR;
#define UNLOCK(...) cores_locked = 0
#endif
#warning "Core locking mechanism should be fixed on H10/4G!"
/* #warning "Core locking mechanism should be fixed on H10/4G!" */
inline void lock_cores(void)
{
@ -110,6 +111,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
static inline void store_context(void* addr) __attribute__ ((always_inline));
static inline void load_context(const void* addr)
__attribute__ ((always_inline));
static inline void core_sleep(void) __attribute__((always_inline));
#if defined(CPU_ARM)
/*---------------------------------------------------------------------------
@ -173,6 +175,30 @@ static inline void load_context(const void* addr)
);
}
#if defined (CPU_PP)
static inline void core_sleep(void)
{
unlock_cores();
/* This should sleep the CPU. It appears to wake by itself on
interrupts */
if (CURRENT_CORE == CPU)
CPU_CTL = PROC_SLEEP;
else
COP_CTL = PROC_SLEEP;
lock_cores();
}
#elif CONFIG_CPU == S3C2440
static inline void core_sleep(void)
{
int i;
CLKCON |= (1 << 2); /* set IDLE bit */
for(i=0; i<10; i++); /* wait for IDLE */
CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
}
#endif
#elif defined(CPU_COLDFIRE)
/*---------------------------------------------------------------------------
* Store non-volatile context.
@ -206,6 +232,11 @@ static inline void load_context(const void* addr)
);
}
static inline void core_sleep(void)
{
asm volatile ("stop #0x2000");
}
/* Set EMAC unit to fractional mode with saturation for each new thread,
since that's what'll be the most useful for most things which the dsp
will do. Codecs should still initialize their preferred modes
@ -263,6 +294,12 @@ static inline void load_context(const void* addr)
);
}
static inline void core_sleep(void)
{
and_b(0x7F, &SBYCR);
asm volatile ("sleep");
}
#endif
#ifndef THREAD_CPU_INIT
@ -270,6 +307,40 @@ static inline void load_context(const void* addr)
#define THREAD_CPU_INIT(core, thread)
#endif
#ifdef THREAD_EXTRA_CHECKS
static void thread_panicf_format_name(char *buffer, struct thread_entry *thread)
{
*buffer = '\0';
if (thread)
{
/* Display thread name if one or ID if none */
const char *fmt = thread->name ? " %s" : " %08lX";
intptr_t name = thread->name ?
(intptr_t)thread->name : (intptr_t)thread;
snprintf(buffer, 16, fmt, name);
}
}
static void thread_panicf(const char *msg,
struct thread_entry *thread1, struct thread_entry *thread2)
{
static char thread1_name[16], thread2_name[16];
thread_panicf_format_name(thread1_name, thread1);
thread_panicf_format_name(thread2_name, thread2);
panicf ("%s%s%s", msg, thread1_name, thread2_name);
}
#else
static void thread_stkov(void)
{
/* Display thread name if one or ID if none */
struct thread_entry *current = cores[CURRENT_CORE].running;
const char *fmt = current->name ? "%s %s" : "%s %08lX";
intptr_t name = current->name ?
(intptr_t)current->name : (intptr_t)current;
panicf(fmt, "Stkov", name);
}
#endif /* THREAD_EXTRA_CHECKS */
static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
{
if (*list == NULL)
@ -392,10 +463,6 @@ static void wake_list_awaken(void)
static inline void sleep_core(void)
{
#if CONFIG_CPU == S3C2440
int i;
#endif
for (;;)
{
/* We want to do these ASAP as it may change the decision to sleep
@ -416,28 +483,7 @@ static inline void sleep_core(void)
break;
/* Enter sleep mode to reduce power usage, woken up on interrupt */
#ifdef CPU_COLDFIRE
asm volatile ("stop #0x2000");
#elif CONFIG_CPU == SH7034
and_b(0x7F, &SBYCR);
asm volatile ("sleep");
#elif defined (CPU_PP)
unlock_cores();
/* This should sleep the CPU. It appears to wake by itself on
interrupts */
if (CURRENT_CORE == CPU)
CPU_CTL = PROC_SLEEP;
else
COP_CTL = PROC_SLEEP;
lock_cores();
#elif CONFIG_CPU == S3C2440
CLKCON |= (1 << 2); /* set IDLE bit */
for(i=0; i<10; i++); /* wait for IDLE */
CLKCON &= ~(1 << 2); /* reset IDLE bit when wake up */
#endif
core_sleep();
}
}
@ -521,12 +567,15 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
/* Check if the current thread stack is overflown */
stackptr = cores[CURRENT_CORE].running->stack;
if(stackptr[0] != DEADBEEF)
panicf("Stkov %s", cores[CURRENT_CORE].running->name);
#ifdef THREAD_EXTRA_CHECKS
thread_panicf("Stkov", cores[CURRENT_CORE].running, NULL);
#else
thread_stkov();
#endif
/* Rearrange thread lists as needed */
change_thread_state(blocked_list);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* This has to be done after the scheduler is finished with the
blocked_list pointer so that an IRQ can't kill us by attempting
a wake but before attempting any core sleep. */
@ -536,7 +585,6 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
cores[CURRENT_CORE].switch_to_irq_level = STAY_IRQ_LEVEL;
set_irq_level(level);
}
#endif
}
/* Go through the list of sleeping task to check if we need to wake up
@ -626,7 +674,7 @@ void block_thread(struct thread_entry **list)
#ifdef THREAD_EXTRA_CHECKS
/* We are not allowed to mix blocking types in one queue. */
if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
panicf("Blocking violation B->*T");
thread_panicf("Blocking violation B->*T", current, *list);
#endif
/* Set the state to blocked and ask the scheduler to switch tasks,
@ -669,7 +717,7 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
/* We can store only one thread to the "list" if thread is used
* in other list (such as core's list for sleeping tasks). */
if (*list)
panicf("Blocking violation T->*B");
thread_panicf("Blocking violation T->*B", current, NULL);
#endif
/* Set the state to blocked with the specified timeout */
@ -686,14 +734,13 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
*list = NULL;
}
#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) && !defined(SIMULATOR)
#if !defined(SIMULATOR)
void set_irq_level_and_block_thread(struct thread_entry **list, int level)
{
cores[CURRENT_CORE].switch_to_irq_level = level;
block_thread(list);
}
#if 0
void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
int timeout, int level)
{
@ -701,7 +748,6 @@ void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
block_thread_w_tmo(list, timeout);
}
#endif
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
void wakeup_thread(struct thread_entry **list)
{