kernel: Break out kernel primitives into separate files and move to separate dir.

No code changed, just shuffling stuff around. This should make it easier to
build only select parts kernel and use different implementations.

Change-Id: Ie1f00f93008833ce38419d760afd70062c5e22b5
This commit is contained in:
Thomas Martitz 2013-12-04 17:06:17 +01:00
parent 8bae5f2644
commit 382d1861af
30 changed files with 1564 additions and 756 deletions

View File

@ -18,7 +18,7 @@
* {,U}INT{8,16,32,64}_{MIN,MAX} */
#include "system.h"
/* HZ, TIME_AFTER */
/* HZ, TIME_AFTER, current_tick */
#include "kernel.h"
/* Structure to record some info during processing call */

View File

@ -29,7 +29,6 @@ usb.c
#if defined(ROCKBOX_HAS_LOGF) || defined(ROCKBOX_HAS_LOGDISKF)
logf.c
#endif /* ROCKBOX_HAS_LOGF */
kernel.c
#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
load_code.c
#ifdef RB_PROFILE
@ -41,7 +40,6 @@ common/rb-loader.c
#if !defined(BOOTLOADER) || defined(CPU_SH)
rolo.c
#endif /* !defined(BOOTLOADER) || defined(CPU_SH) */
thread.c
timer.c
debug.c
#endif /* PLATFORM_NATIVE */
@ -63,7 +61,6 @@ target/hosted/sdl/system-sdl.c
#ifdef HAVE_SDL_THREADS
target/hosted/sdl/thread-sdl.c
#else
thread.c
#endif
target/hosted/sdl/timer-sdl.c
#ifdef HAVE_TOUCHSCREEN
@ -78,7 +75,6 @@ target/hosted/sdl/app/button-application.c
target/hosted/kernel-unix.c
target/hosted/filesystem-unix.c
target/hosted/lc-unix.c
thread.c
drivers/lcd-memframe.c
target/hosted/samsungypr/lcd-ypr.c
target/hosted/samsungypr/gpio-ypr.c
@ -1813,7 +1809,6 @@ target/hosted/android/telephony-android.c
target/hosted/android/app/button-application.c
#endif
drivers/audio/android.c
thread.c
#endif
#endif /* defined(SIMULATOR) */
@ -1821,3 +1816,22 @@ thread.c
#if defined(HAVE_TOUCHPAD) && !defined(HAS_BUTTON_HOLD)
drivers/touchpad.c
#endif
/* firmware/kernel section */
#ifdef HAVE_CORELOCK_OBJECT
kernel/corelock.c
#endif
kernel/mutex.c
kernel/queue.c
#ifdef HAVE_SEMAPHORE_OBJECTS
kernel/semaphore.c
#endif
#if defined(HAVE_SDL_THREADS)
target/hosted/sdl/thread-sdl.c
#else
kernel/thread.c
#endif
kernel/tick.c
#ifdef INCLUDE_TIMEOUT_API
kernel/timeout.c
#endif

View File

@ -0,0 +1,96 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2007 by Daniel Ankers
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/* Core locks using Peterson's mutual exclusion algorithm.
* ASM optimized version of C code, see firmware/asm/corelock.c */
#include "cpu.h"
/*---------------------------------------------------------------------------
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
void __attribute__((naked)) corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
"1: \n"
"ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
"cmp r3, #0 \n" /* yes? lock acquired */
"bxeq lr \n"
"ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
"cmp r3, r1 \n"
"bxeq lr \n" /* yes? lock acquired */
"b 1b \n" /* keep trying */
: : "i"(&PROCESSOR_ID)
);
(void)cl;
}
/*---------------------------------------------------------------------------
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r3, r0 \n"
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
"ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
"eors r0, r0, r2 \n" /* yes? lock acquired */
"bxne lr \n"
"ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
"ands r0, r0, r1 \n"
"streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
"bx lr \n" /* return result */
: : "i"(&PROCESSOR_ID)
);
return 0;
(void)cl;
}
/*---------------------------------------------------------------------------
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
void __attribute__((naked)) corelock_unlock(struct corelock *cl)
{
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r2, #0 \n" /* cl->myl[core] = 0 */
"strb r2, [r0, r1, lsr #7] \n"
"bx lr \n"
: : "i"(&PROCESSOR_ID)
);
(void)cl;
}

67
firmware/asm/corelock.c Normal file
View File

@ -0,0 +1,67 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2007 by Daniel Ankers
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "corelock.h"
/* Core locks using Peterson's mutual exclusion algorithm. */
#ifdef CPU_ARM
#include "arm/corelock.c"
#else
void corelock_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
const unsigned int othercore = 1 - core;
cl->myl[core] = core;
cl->turn = othercore;
for (;;)
{
if (cl->myl[othercore] == 0 || cl->turn == core)
break;
}
}
int corelock_try_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
const unsigned int othercore = 1 - core;
cl->myl[core] = core;
cl->turn = othercore;
if (cl->myl[othercore] == 0 || cl->turn == core)
{
return 1;
}
cl->myl[core] = 0;
return 0;
}
void corelock_unlock(struct corelock *cl)
{
cl->myl[CURRENT_CORE] = 0;
}
#endif

View File

@ -23,6 +23,7 @@
****************************************************************************/
#include "cpu.h"
#include "kernel.h"
#include "debug.h"
#include "system.h"
#include "kernel.h"

View File

@ -24,7 +24,6 @@
#include <stdbool.h>
#include <stdint.h>
#include "cpu.h"
#include "gcc_extensions.h" /* for LIKELY/UNLIKELY */
@ -86,6 +85,10 @@ int get_cpu_boost_counter(void);
#define BAUDRATE 9600
/* wrap-safe macros for tick comparison */
#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
#ifndef NULL
#define NULL ((void*)0)
#endif

View File

@ -7,7 +7,8 @@
# $Id$
#
INCLUDES += -I$(FIRMDIR) -I$(FIRMDIR)/export -I$(FIRMDIR)/drivers -I$(FIRMDIR)/include
INCLUDES += -I$(FIRMDIR) -I$(FIRMDIR)/export -I$(FIRMDIR)/drivers \
-I$(FIRMDIR)/include -I$(FIRMDIR)/kernel/include
ifndef APP_TYPE
INCLUDES += -I$(FIRMDIR)/libc/include
endif

View File

@ -20,8 +20,6 @@
****************************************************************************/
#include <stdio.h>
#include "config.h"
#include "system.h"
#include "kernel.h"
#include "general.h"
#include "file.h"

View File

@ -0,0 +1,40 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2007 by Daniel Ankers
*
* PP5002 and PP502x SoC threading support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include <string.h>
#include "corelock.h"
/* Core locks using Peterson's mutual exclusion algorithm */
/*---------------------------------------------------------------------------
* Initialize the corelock structure.
*---------------------------------------------------------------------------
*/
void corelock_init(struct corelock *cl)
{
memset(cl, 0, sizeof (*cl));
}
/* other corelock methods are ASM-optimized */
#include "asm/corelock.c"

View File

@ -0,0 +1,53 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef CORELOCK_H
#define CORELOCK_H
#include "config.h"
#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
#else
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
extern void corelock_init(struct corelock *cl);
extern void corelock_lock(struct corelock *cl);
extern int corelock_try_lock(struct corelock *cl);
extern void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CORELOCK_H */

View File

@ -0,0 +1,69 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef KERNEL_H
#define KERNEL_H
#include "config.h"
#include "system.h"
#include "queue.h"
#include "mutex.h"
#include "tick.h"
#ifdef INCLUDE_TIMEOUT_API
#include "timeout.h"
#endif
#ifdef HAVE_SEMAPHORE_OBJECTS
#include "semaphore.h"
#endif
#ifdef HAVE_CORELOCK_OBJECT
#include "corelock.h"
#endif
#define OBJ_WAIT_TIMEDOUT (-1)
#define OBJ_WAIT_FAILED 0
#define OBJ_WAIT_SUCCEEDED 1
#define TIMEOUT_BLOCK -1
#define TIMEOUT_NOBLOCK 0
static inline void kernel_init(void)
{
/* Init the threading API */
init_threads();
/* Other processors will not reach this point in a multicore build.
* In a single-core build with multiple cores they fall-through and
* sleep in cop_main without returning. */
if (CURRENT_CORE == CPU)
{
init_queues();
init_tick();
#ifdef KDEV_INIT
kernel_device_init();
#endif
}
}
#endif /* KERNEL_H */

View File

@ -0,0 +1,62 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef MUTEX_H
#define MUTEX_H
#include <stdbool.h>
#include "config.h"
#include "thread.h"
struct mutex
{
struct thread_entry *queue; /* waiter list */
int recursion; /* lock owner recursion count */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker blocker; /* priority inheritance info
for waiters */
bool no_preempt; /* don't allow higher-priority thread
to be scheduled even if woken */
#else
struct thread_entry *thread; /* Indicates owner thread - an owner
implies a locked state - same goes
for priority scheduling
(in blocker struct for that) */
#endif
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
extern void mutex_init(struct mutex *m);
extern void mutex_lock(struct mutex *m);
extern void mutex_unlock(struct mutex *m);
#ifdef HAVE_PRIORITY_SCHEDULING
/* Deprecated temporary function to disable mutex preempting a thread on
* unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
* reliance on it is a bug! */
static inline void mutex_set_preempt(struct mutex *m, bool preempt)
{ m->no_preempt = !preempt; }
#else
/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
static inline bool mutex_test(const struct mutex *m)
{ return m->thread != NULL; }
#endif /* HAVE_PRIORITY_SCHEDULING */
#endif /* MUTEX_H */

View File

@ -18,27 +18,14 @@
* KIND, either express or implied.
*
****************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
#include <stdbool.h>
#include <inttypes.h>
#ifndef QUEUE_H
#define QUEUE_H
#include <stdint.h>
#include "config.h"
#include "thread.h"
/* wrap-safe macros for tick comparison */
#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
#define HZ 100 /* number of ticks per second */
#define MAX_NUM_TICK_TASKS 8
#define MAX_NUM_QUEUES 32
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
/* System defined message ID's - |sign bit = 1|class|id| */
/* Event class list */
#define SYS_EVENT_CLS_QUEUE 0
@ -85,10 +72,9 @@
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
#ifndef TIMEOUT_BLOCK
#define TIMEOUT_BLOCK -1
#define TIMEOUT_NOBLOCK 0
#endif
#define MAX_NUM_QUEUES 32
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
struct queue_event
{
@ -137,100 +123,6 @@ struct event_queue
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
struct mutex
{
struct thread_entry *queue; /* waiter list */
int recursion; /* lock owner recursion count */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker blocker; /* priority inheritance info
for waiters */
bool no_preempt; /* don't allow higher-priority thread
to be scheduled even if woken */
#else
struct thread_entry *thread; /* Indicates owner thread - an owner
implies a locked state - same goes
for priority scheduling
(in blocker struct for that) */
#endif
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
#ifdef HAVE_SEMAPHORE_OBJECTS
struct semaphore
{
struct thread_entry *queue; /* Waiter list */
int volatile count; /* # of waits remaining before unsignaled */
int max; /* maximum # of waits to remain signaled */
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
#endif
/* global tick variable */
#if defined(CPU_PP) && defined(BOOTLOADER) && \
!defined(HAVE_BOOTLOADER_USB_MODE)
/* We don't enable interrupts in the PP bootloader unless USB mode is
enabled for it, so we need to fake the current_tick variable */
#define current_tick (signed)(USEC_TIMER/10000)
static inline void call_tick_tasks(void)
{
}
#else
extern volatile long current_tick;
/* inline helper for implementing target interrupt handler */
static inline void call_tick_tasks(void)
{
extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
void (**p)(void) = tick_funcs;
void (*fn)(void);
current_tick++;
for(fn = *p; fn != NULL; fn = *(++p))
{
fn();
}
}
#endif
/* kernel functions */
extern void kernel_init(void) INIT_ATTR;
extern void yield(void);
extern unsigned sleep(unsigned ticks);
int tick_add_task(void (*f)(void));
int tick_remove_task(void (*f)(void));
extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
#ifdef INCLUDE_TIMEOUT_API
struct timeout;
/* timeout callback type
* tmo - pointer to struct timeout associated with event
* return next interval or <= 0 to stop event
*/
#define MAX_NUM_TIMEOUTS 8
typedef int (* timeout_cb_type)(struct timeout *tmo);
struct timeout
{
timeout_cb_type callback;/* callback - returning false cancels */
intptr_t data; /* data passed to callback */
long expires; /* expiration tick */
};
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data);
void timeout_cancel(struct timeout *tmo);
#endif /* INCLUDE_TIMEOUT_API */
#define STATE_NONSIGNALED 0
#define STATE_SIGNALED 1
#define OBJ_WAIT_TIMEDOUT (-1)
#define OBJ_WAIT_FAILED 0
#define OBJ_WAIT_SUCCEEDED 1
extern void queue_init(struct event_queue *q, bool register_queue);
extern void queue_delete(struct event_queue *q);
extern void queue_wait(struct event_queue *q, struct queue_event *ev);
@ -260,26 +152,6 @@ extern void queue_clear(struct event_queue* q);
extern void queue_remove_from_head(struct event_queue *q, long id);
extern int queue_count(const struct event_queue *q);
extern int queue_broadcast(long id, intptr_t data);
extern void init_queues(void);
extern void mutex_init(struct mutex *m);
extern void mutex_lock(struct mutex *m);
extern void mutex_unlock(struct mutex *m);
#ifdef HAVE_PRIORITY_SCHEDULING
/* Deprecated temporary function to disable mutex preempting a thread on
* unlock - firmware/drivers/fat.c and a couple places in apps/buffering.c -
* reliance on it is a bug! */
static inline void mutex_set_preempt(struct mutex *m, bool preempt)
{ m->no_preempt = !preempt; }
#else
/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
static inline bool mutex_test(const struct mutex *m)
{ return m->thread != NULL; }
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_SEMAPHORE_OBJECTS
extern void semaphore_init(struct semaphore *s, int max, int start);
extern int semaphore_wait(struct semaphore *s, int timeout);
extern void semaphore_release(struct semaphore *s);
#endif /* HAVE_SEMAPHORE_OBJECTS */
#endif /* _KERNEL_H_ */
#endif /* QUEUE_H */

View File

@ -0,0 +1,40 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef SEMAPHORE_H
#define SEMAPHORE_H
#include "config.h"
#include "thread.h"
struct semaphore
{
struct thread_entry *queue; /* Waiter list */
int volatile count; /* # of waits remaining before unsignaled */
int max; /* maximum # of waits to remain signaled */
IF_COP( struct corelock cl; ) /* multiprocessor sync */
};
extern void semaphore_init(struct semaphore *s, int max, int start);
extern int semaphore_wait(struct semaphore *s, int timeout);
extern void semaphore_release(struct semaphore *s);
#endif /* SEMAPHORE_H */

View File

@ -27,6 +27,7 @@
#include <stddef.h>
#include <stdbool.h>
#include "gcc_extensions.h"
#include "corelock.h"
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
* by giving high priority threads more CPU time than lower priority threads
@ -63,6 +64,7 @@
#define IO_PRIORITY_IMMEDIATE 0
#define IO_PRIORITY_BACKGROUND 32
#if CONFIG_CODEC == SWCODEC
# ifdef HAVE_HARDWARE_CLICK
# define BASETHREADS 17
@ -78,7 +80,6 @@
#endif
#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
/*
* We need more stack when we run under a host
* maybe more expensive C lib functions?
@ -99,23 +100,6 @@ struct regs
#include "asm/thread.h"
#endif /* HAVE_SDL_THREADS */
#ifdef CPU_PP
#ifdef HAVE_CORELOCK_OBJECT
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
void corelock_init(struct corelock *cl);
void corelock_lock(struct corelock *cl);
int corelock_try_lock(struct corelock *cl);
void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CPU_PP */
/* NOTE: The use of the word "queue" may also refer to a linked list of
threads being maintained that are normally dealt with in FIFO order
and not necessarily kernel event_queue */
@ -150,14 +134,6 @@ struct thread_list
struct thread_entry *next; /* Next thread in a list */
};
#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
#endif /* HAVE_CORELOCK_OBJECT */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker
{
@ -307,6 +283,9 @@ struct core_entry
#endif /* NUM_CORES */
};
extern void yield(void);
extern unsigned sleep(unsigned ticks);
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
#define IFN_PRIO(...)

View File

@ -0,0 +1,67 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef TICK_H
#define TICK_H
#include "config.h"
#include "system.h" /* for NULL */
extern void init_tick(void);
#define HZ 100 /* number of ticks per second */
#define MAX_NUM_TICK_TASKS 8
/* global tick variable */
#if defined(CPU_PP) && defined(BOOTLOADER) && \
!defined(HAVE_BOOTLOADER_USB_MODE)
/* We don't enable interrupts in the PP bootloader unless USB mode is
enabled for it, so we need to fake the current_tick variable */
#define current_tick (signed)(USEC_TIMER/10000)
static inline void call_tick_tasks(void)
{
}
#else
extern volatile long current_tick;
/* inline helper for implementing target interrupt handler */
static inline void call_tick_tasks(void)
{
extern void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
void (**p)(void) = tick_funcs;
void (*fn)(void);
current_tick++;
for(fn = *p; fn != NULL; fn = *(++p))
{
fn();
}
}
#endif
/* implemented in target tree */
extern void tick_start(unsigned int interval_in_ms) INIT_ATTR;
extern int tick_add_task(void (*f)(void));
extern int tick_remove_task(void (*f)(void));
#endif /* TICK_H */

View File

@ -0,0 +1,46 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
#include "config.h"
struct timeout;
/* timeout callback type
* tmo - pointer to struct timeout associated with event
* return next interval or <= 0 to stop event
*/
#define MAX_NUM_TIMEOUTS 8
typedef int (* timeout_cb_type)(struct timeout *tmo);
struct timeout
{
timeout_cb_type callback;/* callback - returning false cancels */
intptr_t data; /* data passed to callback */
long expires; /* expiration tick */
};
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data);
void timeout_cancel(struct timeout *tmo);
#endif /* _KERNEL_H_ */

View File

@ -0,0 +1,49 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef KERNEL_INTERNAL_H
#define KERNEL_INTERNAL_H
#include "config.h"
#include "debug.h"
/* Make this nonzero to enable more elaborate checks on objects */
#if defined(DEBUG) || defined(SIMULATOR)
#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
#else
#define KERNEL_OBJECT_CHECKS 0
#endif
#if KERNEL_OBJECT_CHECKS
#ifdef SIMULATOR
#include <stdlib.h>
#define KERNEL_ASSERT(exp, msg...) \
({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
#else
#define KERNEL_ASSERT(exp, msg...) \
({ if (!({ exp; })) panicf(msg); })
#endif
#else
#define KERNEL_ASSERT(exp, msg...) ({})
#endif
#endif /* KERNEL_INTERNAL_H */

152
firmware/kernel/mutex.c Normal file
View File

@ -0,0 +1,152 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
#include <stdbool.h>
#include "config.h"
#include "system.h"
#include "mutex.h"
#include "corelock.h"
#include "thread-internal.h"
#include "kernel-internal.h"
static inline void __attribute__((always_inline))
mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
{
#ifdef HAVE_PRIORITY_SCHEDULING
mtx->blocker.thread = td;
#else
mtx->thread = td;
#endif
}
static inline struct thread_entry * __attribute__((always_inline))
mutex_get_thread(volatile struct mutex *mtx)
{
#ifdef HAVE_PRIORITY_SCHEDULING
return mtx->blocker.thread;
#else
return mtx->thread;
#endif
}
/* Initialize a mutex object - call before any use and do not call again once
* the object is available to other threads */
void mutex_init(struct mutex *m)
{
corelock_init(&m->cl);
m->queue = NULL;
m->recursion = 0;
mutex_set_thread(m, NULL);
#ifdef HAVE_PRIORITY_SCHEDULING
m->blocker.priority = PRIORITY_IDLE;
m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
m->no_preempt = false;
#endif
}
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
struct thread_entry *current = thread_self_entry();
if(current == mutex_get_thread(m))
{
/* current thread already owns this mutex */
m->recursion++;
return;
}
/* lock out other cores */
corelock_lock(&m->cl);
/* must read thread again inside cs (a multiprocessor concern really) */
if(LIKELY(mutex_get_thread(m) == NULL))
{
/* lock is open */
mutex_set_thread(m, current);
corelock_unlock(&m->cl);
return;
}
/* block until the lock is open... */
IF_COP( current->obj_cl = &m->cl; )
IF_PRIO( current->blocker = &m->blocker; )
current->bqp = &m->queue;
disable_irq();
block_thread(current);
corelock_unlock(&m->cl);
/* ...and turn control over to next thread */
switch_thread();
}
/* Release ownership of a mutex object - only owning thread must call this */
void mutex_unlock(struct mutex *m)
{
/* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
"mutex_unlock->wrong thread (%s != %s)\n",
mutex_get_thread(m)->name,
thread_self_entry()->name);
if(m->recursion > 0)
{
/* this thread still owns lock */
m->recursion--;
return;
}
/* lock out other cores */
corelock_lock(&m->cl);
/* transfer to next queued thread if any */
if(LIKELY(m->queue == NULL))
{
/* no threads waiting - open the lock */
mutex_set_thread(m, NULL);
corelock_unlock(&m->cl);
return;
}
else
{
const int oldlevel = disable_irq_save();
/* Tranfer of owning thread is handled in the wakeup protocol
* if priorities are enabled otherwise just set it from the
* queue head. */
IFN_PRIO( mutex_set_thread(m, m->queue); )
IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
restore_irq(oldlevel);
corelock_unlock(&m->cl);
#ifdef HAVE_PRIORITY_SCHEDULING
if((result & THREAD_SWITCH) && !m->no_preempt)
switch_thread();
#endif
}
}

View File

@ -18,51 +18,16 @@
* KIND, either express or implied.
*
****************************************************************************/
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "kernel.h"
#include "thread.h"
#include "cpu.h"
#include "system.h"
#include "panic.h"
#include "debug.h"
#include "queue.h"
#include "corelock.h"
#include "kernel-internal.h"
#include "general.h"
/* Make this nonzero to enable more elaborate checks on objects */
#if defined(DEBUG) || defined(SIMULATOR)
#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG and sim*/
#else
#define KERNEL_OBJECT_CHECKS 0
#endif
#if KERNEL_OBJECT_CHECKS
#ifdef SIMULATOR
#define KERNEL_ASSERT(exp, msg...) \
({ if (!({ exp; })) { DEBUGF(msg); exit(-1); } })
#else
#define KERNEL_ASSERT(exp, msg...) \
({ if (!({ exp; })) panicf(msg); })
#endif
#else
#define KERNEL_ASSERT(exp, msg...) ({})
#endif
#if !defined(CPU_PP) || !defined(BOOTLOADER) || \
defined(HAVE_BOOTLOADER_USB_MODE)
volatile long current_tick SHAREDDATA_ATTR = 0;
#endif
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
/* List of tick tasks - final element always NULL for termination */
void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
#include "panic.h"
/* This array holds all queues that are initiated. It is used for broadcast. */
static struct
@ -73,194 +38,6 @@ static struct
#endif
} all_queues SHAREDBSS_ATTR;
/****************************************************************************
* Standard kernel stuff
****************************************************************************/
void kernel_init(void)
{
/* Init the threading API */
init_threads();
/* Other processors will not reach this point in a multicore build.
* In a single-core build with multiple cores they fall-through and
* sleep in cop_main without returning. */
if (CURRENT_CORE == CPU)
{
memset(tick_funcs, 0, sizeof(tick_funcs));
memset(&all_queues, 0, sizeof(all_queues));
corelock_init(&all_queues.cl);
tick_start(1000/HZ);
#ifdef KDEV_INIT
kernel_device_init();
#endif
}
}
/****************************************************************************
* Timer tick - Timer initialization and interrupt handler is defined at
* the target level.
****************************************************************************/
int tick_add_task(void (*f)(void))
{
int oldlevel = disable_irq_save();
void **arr = (void **)tick_funcs;
void **p = find_array_ptr(arr, f);
/* Add a task if there is room */
if(p - arr < MAX_NUM_TICK_TASKS)
{
*p = f; /* If already in list, no problem. */
}
else
{
panicf("Error! tick_add_task(): out of tasks");
}
restore_irq(oldlevel);
return 0;
}
int tick_remove_task(void (*f)(void))
{
int oldlevel = disable_irq_save();
int rc = remove_array_ptr((void **)tick_funcs, f);
restore_irq(oldlevel);
return rc;
}
/****************************************************************************
* Tick-based interval timers/one-shots - be mindful this is not really
* intended for continuous timers but for events that need to run for a short
* time and be cancelled without further software intervention.
****************************************************************************/
#ifdef INCLUDE_TIMEOUT_API
/* list of active timeout events */
static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
/* timeout tick task - calls event handlers when they expire
* Event handlers may alter expiration, callback and data during operation.
*/
static void timeout_tick(void)
{
unsigned long tick = current_tick;
struct timeout **p = tmo_list;
struct timeout *curr;
for(curr = *p; curr != NULL; curr = *(++p))
{
int ticks;
if(TIME_BEFORE(tick, curr->expires))
continue;
/* this event has expired - call callback */
ticks = curr->callback(curr);
if(ticks > 0)
{
curr->expires = tick + ticks; /* reload */
}
else
{
timeout_cancel(curr); /* cancel */
}
}
}
/* Cancels a timeout callback - can be called from the ISR */
void timeout_cancel(struct timeout *tmo)
{
int oldlevel = disable_irq_save();
int rc = remove_array_ptr((void **)tmo_list, tmo);
if(rc >= 0 && *tmo_list == NULL)
{
tick_remove_task(timeout_tick); /* Last one - remove task */
}
restore_irq(oldlevel);
}
/* Adds a timeout callback - calling with an active timeout resets the
interval - can be called from the ISR */
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data)
{
int oldlevel;
void **arr, **p;
if(tmo == NULL)
return;
oldlevel = disable_irq_save();
/* See if this one is already registered */
arr = (void **)tmo_list;
p = find_array_ptr(arr, tmo);
if(p - arr < MAX_NUM_TIMEOUTS)
{
/* Vacancy */
if(*p == NULL)
{
/* Not present */
if(*tmo_list == NULL)
{
tick_add_task(timeout_tick); /* First one - add task */
}
*p = tmo;
}
tmo->callback = callback;
tmo->data = data;
tmo->expires = current_tick + ticks;
}
restore_irq(oldlevel);
}
#endif /* INCLUDE_TIMEOUT_API */
/****************************************************************************
* Thread stuff
****************************************************************************/
/* Suspends a thread's execution for at least the specified number of ticks.
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
/* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* handled */
switch_thread();
}
/****************************************************************************
* Queue handling stuff
****************************************************************************/
@ -1003,237 +780,7 @@ int queue_broadcast(long id, intptr_t data)
return p - all_queues.queues;
}
/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
static inline void __attribute__((always_inline))
mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
void init_queues(void)
{
#ifdef HAVE_PRIORITY_SCHEDULING
mtx->blocker.thread = td;
#else
mtx->thread = td;
#endif
corelock_init(&all_queues.cl);
}
static inline struct thread_entry * __attribute__((always_inline))
mutex_get_thread(volatile struct mutex *mtx)
{
#ifdef HAVE_PRIORITY_SCHEDULING
return mtx->blocker.thread;
#else
return mtx->thread;
#endif
}
/* Initialize a mutex object - call before any use and do not call again once
* the object is available to other threads */
void mutex_init(struct mutex *m)
{
corelock_init(&m->cl);
m->queue = NULL;
m->recursion = 0;
mutex_set_thread(m, NULL);
#ifdef HAVE_PRIORITY_SCHEDULING
m->blocker.priority = PRIORITY_IDLE;
m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
m->no_preempt = false;
#endif
}
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
struct thread_entry *current = thread_self_entry();
if(current == mutex_get_thread(m))
{
/* current thread already owns this mutex */
m->recursion++;
return;
}
/* lock out other cores */
corelock_lock(&m->cl);
/* must read thread again inside cs (a multiprocessor concern really) */
if(LIKELY(mutex_get_thread(m) == NULL))
{
/* lock is open */
mutex_set_thread(m, current);
corelock_unlock(&m->cl);
return;
}
/* block until the lock is open... */
IF_COP( current->obj_cl = &m->cl; )
IF_PRIO( current->blocker = &m->blocker; )
current->bqp = &m->queue;
disable_irq();
block_thread(current);
corelock_unlock(&m->cl);
/* ...and turn control over to next thread */
switch_thread();
}
/* Release ownership of a mutex object - only owning thread must call this */
void mutex_unlock(struct mutex *m)
{
/* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
"mutex_unlock->wrong thread (%s != %s)\n",
mutex_get_thread(m)->name,
thread_self_entry()->name);
if(m->recursion > 0)
{
/* this thread still owns lock */
m->recursion--;
return;
}
/* lock out other cores */
corelock_lock(&m->cl);
/* transfer to next queued thread if any */
if(LIKELY(m->queue == NULL))
{
/* no threads waiting - open the lock */
mutex_set_thread(m, NULL);
corelock_unlock(&m->cl);
return;
}
else
{
const int oldlevel = disable_irq_save();
/* Tranfer of owning thread is handled in the wakeup protocol
* if priorities are enabled otherwise just set it from the
* queue head. */
IFN_PRIO( mutex_set_thread(m, m->queue); )
IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
restore_irq(oldlevel);
corelock_unlock(&m->cl);
#ifdef HAVE_PRIORITY_SCHEDULING
if((result & THREAD_SWITCH) && !m->no_preempt)
switch_thread();
#endif
}
}
/****************************************************************************
* Simple semaphore functions ;)
****************************************************************************/
#ifdef HAVE_SEMAPHORE_OBJECTS
/* Initialize the semaphore object.
* max = maximum up count the semaphore may assume (max >= 1)
* start = initial count of semaphore (0 <= count <= max) */
void semaphore_init(struct semaphore *s, int max, int start)
{
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
"semaphore_init->inv arg\n");
s->queue = NULL;
s->max = max;
s->count = start;
corelock_init(&s->cl);
}
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
* it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
* safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
int ret;
int oldlevel;
int count;
oldlevel = disable_irq_save();
corelock_lock(&s->cl);
count = s->count;
if(LIKELY(count > 0))
{
/* count is not zero; down it */
s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED;
}
else if(timeout == 0)
{
/* just polling it */
ret = OBJ_WAIT_TIMEDOUT;
}
else
{
/* too many waits - block until count is upped... */
struct thread_entry * current = thread_self_entry();
IF_COP( current->obj_cl = &s->cl; )
current->bqp = &s->queue;
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
* explicit in semaphore_release */
current->retval = OBJ_WAIT_TIMEDOUT;
if(timeout > 0)
block_thread_w_tmo(current, timeout); /* ...or timed out... */
else
block_thread(current); /* -timeout = infinite */
corelock_unlock(&s->cl);
/* ...and turn control over to next thread */
switch_thread();
return current->retval;
}
corelock_unlock(&s->cl);
restore_irq(oldlevel);
return ret;
}
/* Up the semaphore's count and release any thread waiting at the head of the
* queue. The count is saturated to the value of the 'max' parameter specified
* in 'semaphore_init'. */
void semaphore_release(struct semaphore *s)
{
unsigned int result = THREAD_NONE;
int oldlevel;
oldlevel = disable_irq_save();
corelock_lock(&s->cl);
if(LIKELY(s->queue != NULL))
{
/* a thread was queued - wake it up and keep count at 0 */
KERNEL_ASSERT(s->count == 0,
"semaphore_release->threads queued but count=%d!\n", s->count);
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
result = wakeup_thread(&s->queue);
}
else
{
int count = s->count;
if(count < s->max)
{
/* nothing waiting - up it */
s->count = count + 1;
}
}
corelock_unlock(&s->cl);
restore_irq(oldlevel);
#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
/* No thread switch if not thread context */
if((result & THREAD_SWITCH) && is_thread_context())
switch_thread();
#endif
(void)result;
}
#endif /* HAVE_SEMAPHORE_OBJECTS */

142
firmware/kernel/semaphore.c Normal file
View File

@ -0,0 +1,142 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
#include <stdbool.h>
#include "config.h"
#include "kernel.h"
#include "semaphore.h"
#include "kernel-internal.h"
#include "thread-internal.h"
/****************************************************************************
* Simple semaphore functions ;)
****************************************************************************/
/* Initialize the semaphore object.
* max = maximum up count the semaphore may assume (max >= 1)
* start = initial count of semaphore (0 <= count <= max) */
void semaphore_init(struct semaphore *s, int max, int start)
{
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
"semaphore_init->inv arg\n");
s->queue = NULL;
s->max = max;
s->count = start;
corelock_init(&s->cl);
}
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
* it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
* safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
int ret;
int oldlevel;
int count;
oldlevel = disable_irq_save();
corelock_lock(&s->cl);
count = s->count;
if(LIKELY(count > 0))
{
/* count is not zero; down it */
s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED;
}
else if(timeout == 0)
{
/* just polling it */
ret = OBJ_WAIT_TIMEDOUT;
}
else
{
/* too many waits - block until count is upped... */
struct thread_entry * current = thread_self_entry();
IF_COP( current->obj_cl = &s->cl; )
current->bqp = &s->queue;
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
* explicit in semaphore_release */
current->retval = OBJ_WAIT_TIMEDOUT;
if(timeout > 0)
block_thread_w_tmo(current, timeout); /* ...or timed out... */
else
block_thread(current); /* -timeout = infinite */
corelock_unlock(&s->cl);
/* ...and turn control over to next thread */
switch_thread();
return current->retval;
}
corelock_unlock(&s->cl);
restore_irq(oldlevel);
return ret;
}
/* Up the semaphore's count and release any thread waiting at the head of the
* queue. The count is saturated to the value of the 'max' parameter specified
* in 'semaphore_init'. */
void semaphore_release(struct semaphore *s)
{
unsigned int result = THREAD_NONE;
int oldlevel;
oldlevel = disable_irq_save();
corelock_lock(&s->cl);
if(LIKELY(s->queue != NULL))
{
/* a thread was queued - wake it up and keep count at 0 */
KERNEL_ASSERT(s->count == 0,
"semaphore_release->threads queued but count=%d!\n", s->count);
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
result = wakeup_thread(&s->queue);
}
else
{
int count = s->count;
if(count < s->max)
{
/* nothing waiting - up it */
s->count = count + 1;
}
}
corelock_unlock(&s->cl);
restore_irq(oldlevel);
#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
/* No thread switch if not thread context */
if((result & THREAD_SWITCH) && is_thread_context())
switch_thread();
#endif
(void)result;
}

View File

@ -0,0 +1,357 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef THREAD_H
#define THREAD_H
#include "config.h"
#include <inttypes.h>
#include <stddef.h>
#include <stdbool.h>
#include "gcc_extensions.h"
/*
* We need more stack when we run under a host
* maybe more expensive C lib functions?
*
* simulator (possibly) doesn't simulate stack usage anyway but well ... */
#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
struct regs
{
void *t; /* OS thread */
void *told; /* Last thread in slot (explained in thead-sdl.c) */
void *s; /* Semaphore for blocking and wakeup */
void (*start)(void); /* Start function */
};
#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
#else
#include "asm/thread.h"
#endif /* HAVE_SDL_THREADS */
#ifdef CPU_PP
#ifdef HAVE_CORELOCK_OBJECT
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
void corelock_init(struct corelock *cl);
void corelock_lock(struct corelock *cl);
int corelock_try_lock(struct corelock *cl);
void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CPU_PP */
/* NOTE: The use of the word "queue" may also refer to a linked list of
threads being maintained that are normally dealt with in FIFO order
and not necessarily kernel event_queue */
enum
{
/* States without a timeout must be first */
STATE_KILLED = 0, /* Thread is killed (default) */
STATE_RUNNING, /* Thread is currently running */
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
/* These states involve adding the thread to the tmo list */
STATE_SLEEPING, /* Thread is sleeping with a timeout */
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
/* Miscellaneous states */
STATE_FROZEN, /* Thread is suspended and will not run until
thread_thaw is called with its ID */
THREAD_NUM_STATES,
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
};
#if NUM_CORES > 1
/* Pointer value for name field to indicate thread is being killed. Using
* an alternate STATE_* won't work since that would interfere with operation
* while the thread is still running. */
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
#endif
/* Link information for lists thread is in */
struct thread_entry; /* forward */
struct thread_list
{
struct thread_entry *prev; /* Previous thread in a list */
struct thread_entry *next; /* Next thread in a list */
};
#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
#endif /* HAVE_CORELOCK_OBJECT */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker
{
struct thread_entry * volatile thread; /* thread blocking other threads
(aka. object owner) */
int priority; /* highest priority waiter */
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
};
/* Choices of wakeup protocol */
/* For transfer of object ownership by one thread to another thread by
* the owning thread itself (mutexes) */
struct thread_entry *
wakeup_priority_protocol_transfer(struct thread_entry *thread);
/* For release by owner where ownership doesn't change - other threads,
* interrupts, timeouts, etc. (mutex timeout, queues) */
struct thread_entry *
wakeup_priority_protocol_release(struct thread_entry *thread);
struct priority_distribution
{
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
uint32_t mask; /* Bitmask of hist entries that are not zero */
};
#endif /* HAVE_PRIORITY_SCHEDULING */
/* Information kept in each thread slot
* members are arranged according to size - largest first - in order
* to ensure both alignment and packing at the same time.
*/
struct thread_entry
{
struct regs context; /* Register context at switch -
_must_ be first member */
uintptr_t *stack; /* Pointer to top of stack */
const char *name; /* Thread name */
long tmo_tick; /* Tick when thread should be woken from
timeout -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_list l; /* Links for blocked/waking/running -
circular linkage in both directions */
struct thread_list tmo; /* Links for timeout list -
Circular in reverse direction, NULL-terminated in
forward direction -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_entry **bqp; /* Pointer to list variable in kernel
object where thread is blocked - used
for implicit unblock and explicit wake
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#ifdef HAVE_CORELOCK_OBJECT
struct corelock *obj_cl; /* Object corelock where thead is blocked -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct corelock waiter_cl; /* Corelock for thread_wait */
struct corelock slot_cl; /* Corelock to lock thread slot */
unsigned char core; /* The core to which thread belongs */
#endif
struct thread_entry *queue; /* List of threads waiting for thread to be
removed */
#ifdef HAVE_WAKEUP_EXT_CB
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
performs special steps needed when being
forced off of an object's wait queue that
go beyond the standard wait queue removal
and priority disinheritance */
/* Only enabled when using queue_send for now */
#endif
#if defined(HAVE_SEMAPHORE_OBJECTS) || \
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
NUM_CORES > 1
volatile intptr_t retval; /* Return value from a blocked operation/
misc. use */
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
/* Priority summary of owned objects that support inheritance */
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
on an object that supports PIP -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct priority_distribution pdist; /* Priority summary of owned objects
that have blocked threads and thread's own
base priority */
int skip_count; /* Number of times skipped if higher priority
thread was running */
unsigned char base_priority; /* Base priority (set explicitly during
creation or thread_set_priority) */
unsigned char priority; /* Scheduled priority (higher of base or
all threads blocked by this one) */
#endif
uint16_t id; /* Current slot id */
unsigned short stack_size; /* Size of stack in bytes */
unsigned char state; /* Thread slot state (STATE_*) */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
unsigned char cpu_boost; /* CPU frequency boost flag */
#endif
#ifdef HAVE_IO_PRIORITY
unsigned char io_priority;
#endif
};
/*** Macros for internal use ***/
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xff00
#define THREAD_ID_SLOT_MASK 0x00ff
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
struct thread_blk_ops
{
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct thread_entry *running; /* threads that are running (RTR) */
struct thread_entry *timeout; /* threads that are on a timeout before
running again */
struct thread_entry *block_task; /* Task going off running list */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr; /* Summary of running and ready-to-run
threads */
#endif
long next_tmo_check; /* soonest time to check tmo threads */
#ifdef HAVE_CORELOCK_OBJECT
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
#define IFN_PRIO(...)
#else
#define IF_PRIO(...)
#define IFN_PRIO(...) __VA_ARGS__
#endif
void core_idle(void);
void core_wake(IF_COP_VOID(unsigned int core));
/* Initialize the scheduler */
void init_threads(void) INIT_ATTR;
/* Allocate a thread in the scheduler */
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
unsigned int create_thread(void (*function)(void),
void* stack, size_t stack_size,
unsigned flags, const char *name
IF_PRIO(, int priority)
IF_COP(, unsigned int core));
/* Set and clear the CPU frequency boost flag for the calling thread */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void);
void cancel_cpu_boost(void);
#else
#define trigger_cpu_boost() do { } while(0)
#define cancel_cpu_boost() do { } while(0)
#endif
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
* Has no effect on a thread not frozen. */
void thread_thaw(unsigned int thread_id);
/* Wait for a thread to exit */
void thread_wait(unsigned int thread_id);
/* Exit the current thread */
void thread_exit(void) NORETURN_ATTR;
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
#define ALLOW_REMOVE_THREAD
/* Remove a thread from the scheduler */
void remove_thread(unsigned int thread_id);
#endif
/* Switch to next runnable thread */
void switch_thread(void);
/* Blocks a thread for at least the specified number of ticks (0 = wait until
* next tick) */
void sleep_thread(int ticks);
/* Indefinitely blocks the current thread on a thread queue */
void block_thread(struct thread_entry *current);
/* Blocks the current thread on a thread queue until explicitely woken or
* the timeout is reached */
void block_thread_w_tmo(struct thread_entry *current, int timeout);
/* Return bit flags for thread wakeup */
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
#define THREAD_OK 0x1 /* A thread was woken up */
#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
higher priority than current were woken) */
/* A convenience function for waking an entire queue of threads. */
unsigned int thread_queue_wake(struct thread_entry **list);
/* Wakeup a thread at the head of a list */
unsigned int wakeup_thread(struct thread_entry **list);
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(unsigned int thread_id, int priority);
int thread_get_priority(unsigned int thread_id);
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_IO_PRIORITY
void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id);
#endif /* HAVE_IO_PRIORITY */
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#endif
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread.
* INTERNAL: Intended for use by kernel and not for programs. */
struct thread_entry* thread_self_entry(void);
/* Debugging info - only! */
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
#endif
void thread_get_name(char *buffer, int size,
struct thread_entry *thread);
#ifdef RB_PROFILE
void profile_thread(void);
#endif
#endif /* THREAD_H */

View File

@ -41,6 +41,7 @@
#endif
#include "core_alloc.h"
#include "gcc_extensions.h"
#include "corelock.h"
/****************************************************************************
* ATTENTION!! *
@ -2390,3 +2391,52 @@ void thread_get_name(char *buffer, int size,
snprintf(buffer, size, fmt, name);
}
}
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
/*---------------------------------------------------------------------------
* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*---------------------------------------------------------------------------
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* handled */
switch_thread();
}

74
firmware/kernel/tick.c Normal file
View File

@ -0,0 +1,74 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Björn Stenberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "tick.h"
#include "general.h"
#include "panic.h"
/****************************************************************************
* Timer tick
*****************************************************************************/
/* List of tick tasks - final element always NULL for termination */
void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
#if !defined(CPU_PP) || !defined(BOOTLOADER) || \
defined(HAVE_BOOTLOADER_USB_MODE)
volatile long current_tick SHAREDDATA_ATTR = 0;
#endif
/* - Timer initialization and interrupt handler is defined at
* the target level: tick_start() is implemented in the target tree */
int tick_add_task(void (*f)(void))
{
int oldlevel = disable_irq_save();
void **arr = (void **)tick_funcs;
void **p = find_array_ptr(arr, f);
/* Add a task if there is room */
if(p - arr < MAX_NUM_TICK_TASKS)
{
*p = f; /* If already in list, no problem. */
}
else
{
panicf("Error! tick_add_task(): out of tasks");
}
restore_irq(oldlevel);
return 0;
}
int tick_remove_task(void (*f)(void))
{
int oldlevel = disable_irq_save();
int rc = remove_array_ptr((void **)tick_funcs, f);
restore_irq(oldlevel);
return rc;
}
void init_tick(void)
{
tick_start(1000/HZ);
}

97
firmware/kernel/timeout.c Normal file
View File

@ -0,0 +1,97 @@
/****************************************************************************
* Tick-based interval timers/one-shots - be mindful this is not really
* intended for continuous timers but for events that need to run for a short
* time and be cancelled without further software intervention.
****************************************************************************/
#include "config.h"
#include "system.h" /* TIME_AFTER */
#include "kernel.h"
#include "timeout.h"
#include "general.h"
/* list of active timeout events */
static struct timeout *tmo_list[MAX_NUM_TIMEOUTS+1];
/* timeout tick task - calls event handlers when they expire
* Event handlers may alter expiration, callback and data during operation.
*/
static void timeout_tick(void)
{
unsigned long tick = current_tick;
struct timeout **p = tmo_list;
struct timeout *curr;
for(curr = *p; curr != NULL; curr = *(++p))
{
int ticks;
if(TIME_BEFORE(tick, curr->expires))
continue;
/* this event has expired - call callback */
ticks = curr->callback(curr);
if(ticks > 0)
{
curr->expires = tick + ticks; /* reload */
}
else
{
timeout_cancel(curr); /* cancel */
}
}
}
/* Cancels a timeout callback - can be called from the ISR */
void timeout_cancel(struct timeout *tmo)
{
int oldlevel = disable_irq_save();
int rc = remove_array_ptr((void **)tmo_list, tmo);
if(rc >= 0 && *tmo_list == NULL)
{
tick_remove_task(timeout_tick); /* Last one - remove task */
}
restore_irq(oldlevel);
}
/* Adds a timeout callback - calling with an active timeout resets the
interval - can be called from the ISR */
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
int ticks, intptr_t data)
{
int oldlevel;
void **arr, **p;
if(tmo == NULL)
return;
oldlevel = disable_irq_save();
/* See if this one is already registered */
arr = (void **)tmo_list;
p = find_array_ptr(arr, tmo);
if(p - arr < MAX_NUM_TIMEOUTS)
{
/* Vacancy */
if(*p == NULL)
{
/* Not present */
if(*tmo_list == NULL)
{
tick_add_task(timeout_tick); /* First one - add task */
}
*p = tmo;
}
tmo->callback = callback;
tmo->data = data;
tmo->expires = current_tick + ticks;
}
restore_irq(oldlevel);
}

View File

@ -19,9 +19,10 @@
*
****************************************************************************/
#include <stdbool.h>
#include "config.h"
#include "system.h"
#include <stdbool.h>
#include "kernel.h"
#include "font.h"
#include "lcd.h"
#include "button.h"

View File

@ -51,128 +51,6 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
[COP] = cop_idlestackbegin
};
/* Core locks using Peterson's mutual exclusion algorithm */
/*---------------------------------------------------------------------------
* Initialize the corelock structure.
*---------------------------------------------------------------------------
*/
void corelock_init(struct corelock *cl)
{
memset(cl, 0, sizeof (*cl));
}
#if 1 /* Assembly locks to minimize overhead */
/*---------------------------------------------------------------------------
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
void __attribute__((naked)) corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
"1: \n"
"ldrb r3, [r0, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
"cmp r3, #0 \n" /* yes? lock acquired */
"bxeq lr \n"
"ldrb r3, [r0, #2] \n" /* || cl->turn == core ? */
"cmp r3, r1 \n"
"bxeq lr \n" /* yes? lock acquired */
"b 1b \n" /* keep trying */
: : "i"(&PROCESSOR_ID)
);
(void)cl;
}
/*---------------------------------------------------------------------------
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r3, r0 \n"
"strb r1, [r0, r1, lsr #7] \n" /* cl->myl[core] = core */
"eor r2, r1, #0xff \n" /* r2 = othercore */
"strb r2, [r0, #2] \n" /* cl->turn = othercore */
"ldrb r0, [r3, r2, lsr #7] \n" /* cl->myl[othercore] == 0 ? */
"eors r0, r0, r2 \n" /* yes? lock acquired */
"bxne lr \n"
"ldrb r0, [r3, #2] \n" /* || cl->turn == core? */
"ands r0, r0, r1 \n"
"streqb r0, [r3, r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
"bx lr \n" /* return result */
: : "i"(&PROCESSOR_ID)
);
return 0;
(void)cl;
}
/*---------------------------------------------------------------------------
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
void __attribute__((naked)) corelock_unlock(struct corelock *cl)
{
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
"ldrb r1, [r1] \n"
"mov r2, #0 \n" /* cl->myl[core] = 0 */
"strb r2, [r0, r1, lsr #7] \n"
"bx lr \n"
: : "i"(&PROCESSOR_ID)
);
(void)cl;
}
#else /* C versions for reference */
void corelock_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
const unsigned int othercore = 1 - core;
cl->myl[core] = core;
cl->turn = othercore;
for (;;)
{
if (cl->myl[othercore] == 0 || cl->turn == core)
break;
}
}
int corelock_try_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
const unsigned int othercore = 1 - core;
cl->myl[core] = core;
cl->turn = othercore;
if (cl->myl[othercore] == 0 || cl->turn == core)
{
return 1;
}
cl->myl[core] = 0;
return 0;
}
void corelock_unlock(struct corelock *cl)
{
cl->myl[CURRENT_CORE] = 0;
}
#endif /* ASM / C selection */
/*---------------------------------------------------------------------------
* Do any device-specific inits for the threads and synchronize the kernel

View File

@ -22,6 +22,7 @@
/* Taken from button-h10.c by Barry Wardell and reverse engineering by MrH. */
#include "system.h"
#include "kernel.h"
#include "button.h"
#include "backlight.h"
#include "powermgmt.h"

View File

@ -22,8 +22,10 @@
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "system.h"
#include "kernel.h"
#include "lcd.h"
#include "lcd-target.h"

View File

@ -682,3 +682,53 @@ void thread_get_name(char *buffer, int size,
snprintf(buffer, size, fmt, name);
}
}
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
/*---------------------------------------------------------------------------
* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*---------------------------------------------------------------------------
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* handled */
switch_thread();
}