Do some kernel cleanup

* Seal away private thread and kernel definitions and declarations
into the internal headers in order to better hide internal structure.

* Add a thread-common.c file that keeps shared functions together.
List functions aren't messed with since that's about to be changed to
different ones.

* It is necessary to modify some ARM/PP stuff since GCC was complaining
about constant pool distance and I would rather not force dump it. Just
bl the cache calls in the startup and exit code and let it use veneers
if it must.

* Clean up redundant #includes in relevant areas and reorganize them.

* Expunge useless and dangerous stuff like remove_thread().

Change-Id: I6e22932fad61a9fac30fd1363c071074ee7ab382
This commit is contained in:
Michael Sevakis 2014-08-08 01:39:29 -04:00
parent 53d9f2e6a7
commit 981d028c09
22 changed files with 366 additions and 986 deletions

View File

@ -133,69 +133,44 @@
#include "talk.h"
/*---------------------------------------------------*/
/* SPECIAL DEBUG STUFF */
/*---------------------------------------------------*/
extern struct thread_entry threads[MAXTHREADS];
static char thread_status_char(unsigned status)
{
static const char thread_status_chars[THREAD_NUM_STATES+1] =
{
[0 ... THREAD_NUM_STATES] = '?',
[STATE_RUNNING] = 'R',
[STATE_BLOCKED] = 'B',
[STATE_SLEEPING] = 'S',
[STATE_BLOCKED_W_TMO] = 'T',
[STATE_FROZEN] = 'F',
[STATE_KILLED] = 'K',
};
if (status > THREAD_NUM_STATES)
status = THREAD_NUM_STATES;
return thread_status_chars[status];
}
static const char* threads_getname(int selected_item, void *data,
char *buffer, size_t buffer_len)
{
(void)data;
struct thread_entry *thread;
char name[32];
#if NUM_CORES > 1
if (selected_item < (int)NUM_CORES)
{
struct core_debug_info coreinfo;
core_get_debug_info(selected_item, &coreinfo);
snprintf(buffer, buffer_len, "Idle (%d): %2d%%", selected_item,
idle_stack_usage(selected_item));
coreinfo.idle_stack_usage);
return buffer;
}
selected_item -= NUM_CORES;
#endif
thread = &threads[selected_item];
if (thread->state == STATE_KILLED)
struct thread_debug_info threadinfo;
if (thread_get_debug_info(selected_item, &threadinfo) <= 0)
{
snprintf(buffer, buffer_len, "%2d: ---", selected_item);
return buffer;
}
thread_get_name(name, 32, thread);
snprintf(buffer, buffer_len,
"%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d %d ") "%2d%% %s",
"%2d: " IF_COP("(%d) ") "%s " IF_PRIO("%d %d ") "%2d%% %s",
selected_item,
IF_COP(thread->core,)
#ifdef HAVE_SCHEDULER_BOOSTCTRL
(thread->cpu_boost) ? '+' :
#if NUM_CORES > 1
threadinfo.core,
#endif
((thread->state == STATE_RUNNING) ? '*' : ' '),
thread_status_char(thread->state),
IF_PRIO(thread->base_priority, thread->priority, )
thread_stack_usage(thread), name);
threadinfo.statusstr,
#ifdef HAVE_PRIORITY_SCHEDULING
threadinfo.base_priority,
threadinfo.current_priority,
#endif
threadinfo.stack_usage,
threadinfo.name);
return buffer;
}
@ -203,19 +178,6 @@ static const char* threads_getname(int selected_item, void *data,
static int dbg_threads_action_callback(int action, struct gui_synclist *lists)
{
(void)lists;
#ifdef ROCKBOX_HAS_LOGF
if (action == ACTION_STD_OK)
{
int selpos = gui_synclist_get_sel_pos(lists);
#if NUM_CORES > 1
if (selpos >= NUM_CORES)
remove_thread(threads[selpos - NUM_CORES].id);
#else
remove_thread(threads[selpos].id);
#endif
return ACTION_REDRAW;
}
#endif /* ROCKBOX_HAS_LOGF */
if (action == ACTION_NONE)
action = ACTION_REDRAW;
return action;

View File

@ -28,7 +28,7 @@
#include "rtc.h"
#include "debug.h"
#include "led.h"
#include "kernel.h"
#include "../kernel-internal.h"
#include "button.h"
#include "tree.h"
#include "filetypes.h"
@ -44,7 +44,6 @@
#endif
#include "audio.h"
#include "mp3_playback.h"
#include "thread.h"
#include "settings.h"
#include "backlight.h"
#include "status.h"

View File

@ -1838,6 +1838,7 @@ target/hosted/sdl/thread-sdl.c
#else
kernel/thread.c
#endif
kernel/thread-common.c
kernel/tick.c
#ifdef INCLUDE_TIMEOUT_API
kernel/timeout.c

View File

@ -34,9 +34,7 @@ static void __attribute__((naked)) USED_ATTR start_thread(void)
"mov r1, #0 \n" /* Mark thread as running */
"str r1, [r0, #40] \n"
#if NUM_CORES > 1
"ldr r0, =commit_discard_idcache \n" /* Invalidate this core's cache. */
"mov lr, pc \n" /* This could be the first entry into */
"bx r0 \n" /* plugin or codec code for this core. */
"bl commit_discard_idcache \n" /* Invalidate this core's cache. */
#endif
"mov lr, pc \n" /* Call thread function */
"bx r4 \n"

View File

@ -28,10 +28,14 @@
#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
#define corelock_init(cl) \
do {} while (0)
#define corelock_lock(cl) \
do {} while (0)
#define corelock_try_lock(cl) \
do {} while (0)
#define corelock_unlock(cl) \
do {} while (0)
#else

View File

@ -48,23 +48,4 @@
#define TIMEOUT_BLOCK -1
#define TIMEOUT_NOBLOCK 0
static inline void kernel_init(void)
{
/* Init the threading API */
init_threads();
/* Other processors will not reach this point in a multicore build.
* In a single-core build with multiple cores they fall-through and
* sleep in cop_main without returning. */
if (CURRENT_CORE == CPU)
{
init_queues();
init_tick();
#ifdef KDEV_INIT
kernel_device_init();
#endif
}
}
#endif /* KERNEL_H */

View File

@ -21,6 +21,8 @@
#ifndef MRSW_LOCK_H
#define MRSW_LOCK_H
#include "thread.h"
/* Multi-reader, single-writer object that allows mutltiple readers or a
* single writer thread access to a critical section.
*

View File

@ -22,8 +22,6 @@
#ifndef MUTEX_H
#define MUTEX_H
#include <stdbool.h>
#include "config.h"
#include "thread.h"
struct mutex

View File

@ -22,7 +22,6 @@
#ifndef SEMAPHORE_H
#define SEMAPHORE_H
#include "config.h"
#include "thread.h"
struct semaphore

View File

@ -18,17 +18,16 @@
* KIND, either express or implied.
*
****************************************************************************/
#ifndef THREAD_H
#define THREAD_H
#include "config.h"
#include <inttypes.h>
#include <stddef.h>
#include <stdbool.h>
#include "config.h"
#include "gcc_extensions.h"
#include "corelock.h"
#include "bitarray.h"
#include "corelock.h"
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
* by giving high priority threads more CPU time than lower priority threads
@ -65,7 +64,6 @@
#define IO_PRIORITY_IMMEDIATE 0
#define IO_PRIORITY_BACKGROUND 32
#if CONFIG_CODEC == SWCODEC
# ifdef HAVE_HARDWARE_CLICK
# define BASETHREADS 17
@ -85,6 +83,8 @@
BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS)
BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES)
struct thread_entry;
/*
* We need more stack when we run under a host
* maybe more expensive C lib functions?
@ -92,53 +92,22 @@ BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES)
* simulator (possibly) doesn't simulate stack usage anyway but well ... */
#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__)
struct regs
{
void *t; /* OS thread */
void *told; /* Last thread in slot (explained in thead-sdl.c) */
void *s; /* Semaphore for blocking and wakeup */
void (*start)(void); /* Start function */
};
#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
#else
#include "asm/thread.h"
#endif /* HAVE_SDL_THREADS */
/* NOTE: The use of the word "queue" may also refer to a linked list of
threads being maintained that are normally dealt with in FIFO order
and not necessarily kernel event_queue */
enum
{
/* States without a timeout must be first */
STATE_KILLED = 0, /* Thread is killed (default) */
STATE_RUNNING, /* Thread is currently running */
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
/* These states involve adding the thread to the tmo list */
STATE_SLEEPING, /* Thread is sleeping with a timeout */
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
/* Miscellaneous states */
STATE_FROZEN, /* Thread is suspended and will not run until
thread_thaw is called with its ID */
THREAD_NUM_STATES,
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
};
extern void yield(void);
extern unsigned sleep(unsigned ticks);
#if NUM_CORES > 1
/* Pointer value for name field to indicate thread is being killed. Using
* an alternate STATE_* won't work since that would interfere with operation
* while the thread is still running. */
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
#define IFN_PRIO(...)
#else
#define IF_PRIO(...)
#define IFN_PRIO(...) __VA_ARGS__
#endif
/* Link information for lists thread is in */
struct thread_entry; /* forward */
struct thread_list
{
struct thread_entry *prev; /* Previous thread in a list */
struct thread_entry *next; /* Next thread in a list */
};
/* Basic structure describing the owner of an object */
struct blocker
{
@ -163,157 +132,9 @@ struct blocker_splay
#endif /* HAVE_PRIORITY_SCHEDULING */
};
#ifdef HAVE_PRIORITY_SCHEDULING
/* Quick-disinherit of priority elevation. Must be a running thread. */
void priority_disinherit(struct thread_entry *thread, struct blocker *bl);
struct priority_distribution
{
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
priobit_t mask; /* Bitmask of hist entries that are not zero */
};
#endif /* HAVE_PRIORITY_SCHEDULING */
/* Information kept in each thread slot
* members are arranged according to size - largest first - in order
* to ensure both alignment and packing at the same time.
*/
struct thread_entry
{
struct regs context; /* Register context at switch -
_must_ be first member */
uintptr_t *stack; /* Pointer to top of stack */
const char *name; /* Thread name */
long tmo_tick; /* Tick when thread should be woken from
timeout -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_list l; /* Links for blocked/waking/running -
circular linkage in both directions */
struct thread_list tmo; /* Links for timeout list -
Circular in reverse direction, NULL-terminated in
forward direction -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_entry **bqp; /* Pointer to list variable in kernel
object where thread is blocked - used
for implicit unblock and explicit wake
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#ifdef HAVE_CORELOCK_OBJECT
struct corelock *obj_cl; /* Object corelock where thead is blocked -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct corelock waiter_cl; /* Corelock for thread_wait */
struct corelock slot_cl; /* Corelock to lock thread slot */
unsigned char core; /* The core to which thread belongs */
#endif
struct thread_entry *queue; /* List of threads waiting for thread to be
removed */
#ifdef HAVE_WAKEUP_EXT_CB
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
performs special steps needed when being
forced off of an object's wait queue that
go beyond the standard wait queue removal
and priority disinheritance */
/* Only enabled when using queue_send for now */
#endif
#if defined(HAVE_SEMAPHORE_OBJECTS) || \
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
NUM_CORES > 1
volatile intptr_t retval; /* Return value from a blocked operation/
misc. use */
#endif
uint32_t id; /* Current slot id */
int __errno; /* Thread error number (errno tls) */
#ifdef HAVE_PRIORITY_SCHEDULING
/* Priority summary of owned objects that support inheritance */
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
on an object that supports PIP -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct priority_distribution pdist; /* Priority summary of owned objects
that have blocked threads and thread's own
base priority */
int skip_count; /* Number of times skipped if higher priority
thread was running */
unsigned char base_priority; /* Base priority (set explicitly during
creation or thread_set_priority) */
unsigned char priority; /* Scheduled priority (higher of base or
all threads blocked by this one) */
#endif
unsigned short stack_size; /* Size of stack in bytes */
unsigned char state; /* Thread slot state (STATE_*) */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
unsigned char cpu_boost; /* CPU frequency boost flag */
#endif
#ifdef HAVE_IO_PRIORITY
unsigned char io_priority;
#endif
};
/*** Macros for internal use ***/
/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xffffff00
#define THREAD_ID_SLOT_MASK 0x000000ff
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
struct thread_blk_ops
{
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct thread_entry *running; /* threads that are running (RTR) */
struct thread_entry *timeout; /* threads that are on a timeout before
running again */
struct thread_entry *block_task; /* Task going off running list */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr; /* Summary of running and ready-to-run
threads */
#endif
long next_tmo_check; /* soonest time to check tmo threads */
#ifdef HAVE_CORELOCK_OBJECT
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
extern void yield(void);
extern unsigned sleep(unsigned ticks);
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
#define IFN_PRIO(...)
#else
#define IF_PRIO(...)
#define IFN_PRIO(...) __VA_ARGS__
#endif
void core_idle(void);
void core_wake(IF_COP_VOID(unsigned int core));
/* Initialize the scheduler */
void init_threads(void) INIT_ATTR;
/* Allocate a thread in the scheduler */
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
unsigned int create_thread(void (*function)(void),
@ -330,59 +151,17 @@ void cancel_cpu_boost(void);
#define trigger_cpu_boost() do { } while(0)
#define cancel_cpu_boost() do { } while(0)
#endif
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
/* Make a frozen thread runnable (when started with CREATE_THREAD_FROZEN).
* Has no effect on a thread not frozen. */
void thread_thaw(unsigned int thread_id);
/* Wait for a thread to exit */
void thread_wait(unsigned int thread_id);
/* Exit the current thread */
void thread_exit(void) NORETURN_ATTR;
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
#define ALLOW_REMOVE_THREAD
/* Remove a thread from the scheduler */
void remove_thread(unsigned int thread_id);
#endif
/* Switch to next runnable thread */
void switch_thread(void);
/* Blocks a thread for at least the specified number of ticks (0 = wait until
* next tick) */
void sleep_thread(int ticks);
/* Blocks the current thread on a thread queue (< 0 == infinite) */
void block_thread(struct thread_entry *current, int timeout);
/* Return bit flags for thread wakeup */
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
#define THREAD_OK 0x1 /* A thread was woken up */
#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
higher priority than current were woken) */
/* A convenience function for waking an entire queue of threads. */
unsigned int thread_queue_wake(struct thread_entry **list);
/* Wakeup a thread at the head of a list */
enum wakeup_thread_protocol
{
WAKEUP_DEFAULT,
WAKEUP_TRANSFER,
WAKEUP_RELEASE,
WAKEUP_TRANSFER_MULTI,
};
unsigned int wakeup_thread_(struct thread_entry **list
IF_PRIO(, enum wakeup_thread_protocol proto));
#ifdef HAVE_PRIORITY_SCHEDULING
#define wakeup_thread(list, proto) \
wakeup_thread_((list), (proto))
int thread_set_priority(unsigned int thread_id, int priority);
int thread_get_priority(unsigned int thread_id);
#else /* !HAVE_PRIORITY_SCHEDULING */
#define wakeup_thread(list, proto...) \
wakeup_thread_((list));
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_IO_PRIORITY
@ -396,19 +175,31 @@ unsigned int switch_core(unsigned int new_core);
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread.
* INTERNAL: Intended for use by kernel and not for programs. */
struct thread_entry* thread_self_entry(void);
/* Debugging info - only! */
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
struct core_debug_info
{
unsigned int idle_stack_usage;
};
int core_get_debug_info(unsigned int core, struct core_debug_info *infop);
#endif /* NUM_CORES */
struct thread_debug_info
{
char statusstr[4];
char name[32];
unsigned int stack_usage;
#if NUM_CORES > 1
unsigned int core;
#endif
void thread_get_name(char *buffer, int size,
struct thread_entry *thread);
#ifdef RB_PROFILE
void profile_thread(void);
#ifdef HAVE_PRIORITY_SCHEDULING
int base_priority;
int current_priority;
#endif
};
int thread_get_debug_info(unsigned int thread_id,
struct thread_debug_info *infop);
#endif /* THREAD_H */

View File

@ -22,8 +22,8 @@
#ifndef KERNEL_INTERNAL_H
#define KERNEL_INTERNAL_H
#include "config.h"
#include "debug.h"
#include "thread-internal.h"
#include "kernel.h"
/* Make this nonzero to enable more elaborate checks on objects */
#if defined(DEBUG) || defined(SIMULATOR)
@ -45,5 +45,23 @@
#define KERNEL_ASSERT(exp, msg...) ({})
#endif
static inline void kernel_init(void)
{
/* Init the threading API */
extern void init_threads(void);
init_threads();
/* Other processors will not reach this point in a multicore build.
* In a single-core build with multiple cores they fall-through and
* sleep in cop_main without returning. */
if (CURRENT_CORE == CPU)
{
init_queues();
init_tick();
#ifdef KDEV_INIT
kernel_device_init();
#endif
}
}
#endif /* KERNEL_INTERNAL_H */

View File

@ -18,12 +18,8 @@
* KIND, either express or implied.
*
****************************************************************************/
#include <string.h>
#include "config.h"
#include "system.h"
#include "thread.h"
#include "kernel.h"
#include "kernel-internal.h"
#include "mrsw-lock.h"
#ifdef HAVE_PRIORITY_SCHEDULING
@ -45,9 +41,7 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
Therefore, if the queue has threads, then the next after the
owning readers is a writer and this is not the last reader. */
if (mrsw->queue)
{
corelock_lock(&mrsw->splay.cl);
}
threadbit_clear_bit(&mrsw->splay.mask, slotnum);

View File

@ -23,13 +23,8 @@
/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
#include <stdbool.h>
#include "config.h"
#include "system.h"
#include "kernel.h"
#include "thread-internal.h"
#include "kernel-internal.h"
#include "mutex.h"
/* Initialize a mutex object - call before any use and do not call again once
* the object is available to other threads */

View File

@ -194,23 +194,6 @@ static void remove_from_list_l(struct thread_entry **list,
thread->l.next->l.prev = thread->l.prev;
}
unsigned int thread_queue_wake(struct thread_entry **list)
{
unsigned int result = THREAD_NONE;
for (;;)
{
unsigned int rc = wakeup_thread(list);
if (rc == THREAD_NONE)
break;
result |= rc;
}
return result;
}
/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
* to a corelock instance, and this corelock must be held by the caller */
void block_thread_switch(struct thread_entry *t, struct corelock *cl)

View File

@ -18,16 +18,10 @@
* KIND, either express or implied.
*
****************************************************************************/
#include <string.h>
#include "config.h"
#include "kernel.h"
#include "system.h"
#include "queue.h"
#include "corelock.h"
#include "kernel-internal.h"
#include "queue.h"
#include "general.h"
#include "panic.h"
/* This array holds all queues that are initiated. It is used for broadcast. */
static struct

View File

@ -18,18 +18,8 @@
* KIND, either express or implied.
*
****************************************************************************/
/****************************************************************************
* Simple mutex functions ;)
****************************************************************************/
#include <stdbool.h>
#include "config.h"
#include "kernel.h"
#include "semaphore.h"
#include "kernel-internal.h"
#include "thread-internal.h"
#include "semaphore.h"
/****************************************************************************
* Simple semaphore functions ;)

View File

@ -0,0 +1,152 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "thread-internal.h"
#include "system.h"
/*---------------------------------------------------------------------------
* Wakeup an entire queue of threads - returns bitwise-or of return bitmask
* from each operation or THREAD_NONE of nothing was awakened. Object owning
* the queue must be locked first.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*---------------------------------------------------------------------------
*/
unsigned int thread_queue_wake(struct thread_entry **list)
{
unsigned result = THREAD_NONE;
for (;;)
{
unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
if (rc == THREAD_NONE)
break; /* No more threads */
result |= rc;
}
return result;
}
/** Debug screen stuff **/
/*---------------------------------------------------------------------------
* returns the stack space used in bytes
*---------------------------------------------------------------------------
*/
static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
{
unsigned int usage = 0;
unsigned int stack_words = stack_size / sizeof (uintptr_t);
for (unsigned int i = 0; i < stack_words; i++)
{
if (stackptr[i] != DEADBEEF)
{
usage = (stack_words - i) * 100 / stack_words;
break;
}
}
return usage;
}
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
* runtime.
*---------------------------------------------------------------------------
*/
int core_get_debug_info(unsigned int core, struct core_debug_info *infop)
{
extern uintptr_t * const idle_stacks[NUM_CORES];
if (core >= NUM_CORES || !infop)
return -1;
infop->idle_stack_usage = stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
return 1;
}
#endif /* NUM_CORES > 1 */
int thread_get_debug_info(unsigned int thread_id,
struct thread_debug_info *infop)
{
static const char status_chars[THREAD_NUM_STATES+1] =
{
[0 ... THREAD_NUM_STATES] = '?',
[STATE_RUNNING] = 'R',
[STATE_BLOCKED] = 'B',
[STATE_SLEEPING] = 'S',
[STATE_BLOCKED_W_TMO] = 'T',
[STATE_FROZEN] = 'F',
[STATE_KILLED] = 'K',
};
if (!infop)
return -1;
unsigned int slot = THREAD_ID_SLOT(thread_id);
if (slot >= MAXTHREADS)
return -1;
extern struct thread_entry threads[MAXTHREADS];
struct thread_entry *thread = &threads[slot];
int oldlevel = disable_irq_save();
LOCK_THREAD(thread);
unsigned int state = thread->state;
if (state != STATE_KILLED)
{
const char *name = thread->name;
if (!name)
name = "";
bool cpu_boost = false;
#ifdef HAVE_SCHEDULER_BOOSTCTRL
cpu_boost = thread->cpu_boost;
#endif
infop->stack_usage = stack_usage(thread->stack, thread->stack_size);
#if NUM_CORES > 1
infop->core = thread->core;
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
infop->base_priority = thread->base_priority;
infop->current_priority = thread->priority;
#endif
snprintf(infop->statusstr, sizeof (infop->statusstr), "%c%c",
cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '),
status_chars[state]);
const char *fmt = *name ? "%s" : "%s%08lX";
snprintf(infop->name, sizeof (infop->name), fmt, name,
thread->id);
}
UNLOCK_THREAD(thread);
restore_irq(oldlevel);
return state == STATE_KILLED ? 0 : 1;
}

View File

@ -18,15 +18,13 @@
* KIND, either express or implied.
*
****************************************************************************/
#ifndef THREAD_INTERNAL_H
#define THREAD_INTERNAL_H
#ifndef THREAD_H
#define THREAD_H
#include "config.h"
#include <inttypes.h>
#include <stddef.h>
#include <stdbool.h>
#include "gcc_extensions.h"
#include "thread.h"
#include <stdio.h>
#include "panic.h"
#include "debug.h"
/*
* We need more stack when we run under a host
@ -48,23 +46,6 @@ struct regs
#include "asm/thread.h"
#endif /* HAVE_SDL_THREADS */
#ifdef CPU_PP
#ifdef HAVE_CORELOCK_OBJECT
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
void corelock_init(struct corelock *cl);
void corelock_lock(struct corelock *cl);
int corelock_try_lock(struct corelock *cl);
void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CPU_PP */
/* NOTE: The use of the word "queue" may also refer to a linked list of
threads being maintained that are normally dealt with in FIFO order
and not necessarily kernel event_queue */
@ -84,12 +65,35 @@ enum
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
};
#if NUM_CORES > 1
/* Pointer value for name field to indicate thread is being killed. Using
* an alternate STATE_* won't work since that would interfere with operation
* while the thread is still running. */
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
/* Quick-disinherit of priority elevation. Must be a running thread. */
void priority_disinherit(struct thread_entry *thread, struct blocker *bl);
struct priority_distribution
{
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
priobit_t mask; /* Bitmask of hist entries that are not zero */
};
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
struct thread_blk_ops
{
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Link information for lists thread is in */
struct thread_entry; /* forward */
@ -99,44 +103,6 @@ struct thread_list
struct thread_entry *next; /* Next thread in a list */
};
#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
#endif /* HAVE_CORELOCK_OBJECT */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker
{
struct thread_entry * volatile thread; /* thread blocking other threads
(aka. object owner) */
int priority; /* highest priority waiter */
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
};
/* Choices of wakeup protocol */
/* For transfer of object ownership by one thread to another thread by
* the owning thread itself (mutexes) */
struct thread_entry *
wakeup_priority_protocol_transfer(struct thread_entry *thread);
/* For release by owner where ownership doesn't change - other threads,
* interrupts, timeouts, etc. (mutex timeout, queues) */
struct thread_entry *
wakeup_priority_protocol_release(struct thread_entry *thread);
struct priority_distribution
{
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
uint32_t mask; /* Bitmask of hist entries that are not zero */
};
#endif /* HAVE_PRIORITY_SCHEDULING */
/* Information kept in each thread slot
* members are arranged according to size - largest first - in order
* to ensure both alignment and packing at the same time.
@ -183,6 +149,8 @@ struct thread_entry
volatile intptr_t retval; /* Return value from a blocked operation/
misc. use */
#endif
uint32_t id; /* Current slot id */
int __errno; /* Thread error number (errno tls) */
#ifdef HAVE_PRIORITY_SCHEDULING
/* Priority summary of owned objects that support inheritance */
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
@ -198,7 +166,6 @@ struct thread_entry
unsigned char priority; /* Scheduled priority (higher of base or
all threads blocked by this one) */
#endif
uint16_t id; /* Current slot id */
unsigned short stack_size; /* Size of stack in bytes */
unsigned char state; /* Thread slot state (STATE_*) */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
@ -209,30 +176,6 @@ struct thread_entry
#endif
};
/*** Macros for internal use ***/
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xff00
#define THREAD_ID_SLOT_MASK 0x00ff
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
struct thread_blk_ops
{
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
@ -256,61 +199,45 @@ struct core_entry
#endif /* NUM_CORES */
};
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(...) __VA_ARGS__
#define IFN_PRIO(...)
#else
#define IF_PRIO(...)
#define IFN_PRIO(...) __VA_ARGS__
#endif
/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xffffff00
#define THREAD_ID_SLOT_MASK 0x000000ff
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
void core_idle(void);
void core_wake(IF_COP_VOID(unsigned int core));
/* Thread locking */
#if NUM_CORES > 1
#define LOCK_THREAD(thread) \
({ corelock_lock(&(thread)->slot_cl); })
#define TRY_LOCK_THREAD(thread) \
({ corelock_try_lock(&(thread)->slot_cl); })
#define UNLOCK_THREAD(thread) \
({ corelock_unlock(&(thread)->slot_cl); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ unsigned int _core = (thread)->core; \
cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
#else /* NUM_CORES == 1*/
#define LOCK_THREAD(thread) \
({ (void)(thread); })
#define TRY_LOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ (void)(thread); })
#endif /* NUM_CORES */
/* Initialize the scheduler */
void init_threads(void) INIT_ATTR;
/* Allocate a thread in the scheduler */
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
unsigned int create_thread(void (*function)(void),
void* stack, size_t stack_size,
unsigned flags, const char *name
IF_PRIO(, int priority)
IF_COP(, unsigned int core));
/* Set and clear the CPU frequency boost flag for the calling thread */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void);
void cancel_cpu_boost(void);
#else
#define trigger_cpu_boost() do { } while(0)
#define cancel_cpu_boost() do { } while(0)
#endif
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
* Has no effect on a thread not frozen. */
void thread_thaw(unsigned int thread_id);
/* Wait for a thread to exit */
void thread_wait(unsigned int thread_id);
/* Exit the current thread */
void thread_exit(void) NORETURN_ATTR;
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
#define ALLOW_REMOVE_THREAD
/* Remove a thread from the scheduler */
void remove_thread(unsigned int thread_id);
#endif
#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
/* Switch to next runnable thread */
void switch_thread(void);
/* Blocks a thread for at least the specified number of ticks (0 = wait until
* next tick) */
void sleep_thread(int ticks);
/* Indefinitely blocks the current thread on a thread queue */
void block_thread(struct thread_entry *current);
/* Blocks the current thread on a thread queue until explicitely woken or
* the timeout is reached */
void block_thread_w_tmo(struct thread_entry *current, int timeout);
/* Blocks the current thread on a thread queue (< 0 == infinite) */
void block_thread(struct thread_entry *current, int timeout);
/* Return bit flags for thread wakeup */
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
@ -322,12 +249,25 @@ void block_thread_w_tmo(struct thread_entry *current, int timeout);
unsigned int thread_queue_wake(struct thread_entry **list);
/* Wakeup a thread at the head of a list */
unsigned int wakeup_thread(struct thread_entry **list);
enum wakeup_thread_protocol
{
WAKEUP_DEFAULT,
WAKEUP_TRANSFER,
WAKEUP_RELEASE,
WAKEUP_TRANSFER_MULTI,
};
unsigned int wakeup_thread_(struct thread_entry **list
IF_PRIO(, enum wakeup_thread_protocol proto));
#ifdef HAVE_PRIORITY_SCHEDULING
int thread_set_priority(unsigned int thread_id, int priority);
int thread_get_priority(unsigned int thread_id);
#define wakeup_thread(list, proto) \
wakeup_thread_((list), (proto))
#else /* !HAVE_PRIORITY_SCHEDULING */
#define wakeup_thread(list, proto...) \
wakeup_thread_((list));
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_IO_PRIORITY
void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id);
@ -339,19 +279,14 @@ unsigned int switch_core(unsigned int new_core);
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread.
* INTERNAL: Intended for use by kernel and not for programs. */
/* Return the thread_entry for the calling thread */
struct thread_entry* thread_self_entry(void);
/* Debugging info - only! */
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
#endif
void thread_get_name(char *buffer, int size,
struct thread_entry *thread);
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
#ifdef RB_PROFILE
void profile_thread(void);
#endif
#endif /* THREAD_H */
#endif /* THREAD_INTERNAL_H */

View File

@ -28,11 +28,7 @@
#undef _FORTIFY_SOURCE
#endif
#include <stdbool.h>
#include <stdio.h>
#include "thread.h"
#include "panic.h"
#include "system.h"
#include "thread-internal.h"
#include "kernel.h"
#include "cpu.h"
#include "string.h"
@ -40,8 +36,6 @@
#include <profile.h>
#endif
#include "core_alloc.h"
#include "gcc_extensions.h"
#include "corelock.h"
/****************************************************************************
* ATTENTION!! *
@ -131,7 +125,6 @@
/* Cast to the the machine pointer size, whose size could be < 4 or > 32
* (someday :). */
#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
static struct core_entry cores[NUM_CORES] IBSS_ATTR;
struct thread_entry threads[MAXTHREADS] IBSS_ATTR;
@ -204,57 +197,36 @@ void switch_thread(void)
* End Processor-specific section
***************************************************************************/
#if THREAD_EXTRA_CHECKS
static void thread_panicf(const char *msg, struct thread_entry *thread)
static NO_INLINE
void thread_panicf(const char *msg, struct thread_entry *thread)
{
IF_COP( const unsigned int core = thread->core; )
static char name[32];
thread_get_name(name, 32, thread);
static char namebuf[sizeof (((struct thread_debug_info *)0)->name)];
const char *name = thread->name;
if (!name)
name = "";
snprintf(namebuf, sizeof (namebuf), *name ? "%s" : "%s%08lX",
name, (unsigned long)thread->id);
panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
}
static void thread_stkov(struct thread_entry *thread)
{
thread_panicf("Stkov", thread);
}
#if THREAD_EXTRA_CHECKS
#define THREAD_PANICF(msg, thread) \
thread_panicf(msg, thread)
#define THREAD_ASSERT(exp, msg, thread) \
({ if (!({ exp; })) thread_panicf((msg), (thread)); })
#else
static void thread_stkov(struct thread_entry *thread)
{
IF_COP( const unsigned int core = thread->core; )
static char name[32];
thread_get_name(name, 32, thread);
panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
}
#define THREAD_PANICF(msg, thread)
#define THREAD_ASSERT(exp, msg, thread)
#define THREAD_PANICF(msg, thread) \
do {} while (0)
#define THREAD_ASSERT(exp, msg, thread) \
do {} while (0)
#endif /* THREAD_EXTRA_CHECKS */
/* Thread locking */
#if NUM_CORES > 1
#define LOCK_THREAD(thread) \
({ corelock_lock(&(thread)->slot_cl); })
#define TRY_LOCK_THREAD(thread) \
({ corelock_try_lock(&(thread)->slot_cl); })
#define UNLOCK_THREAD(thread) \
({ corelock_unlock(&(thread)->slot_cl); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ unsigned int _core = (thread)->core; \
cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
#else
#define LOCK_THREAD(thread) \
({ (void)(thread); })
#define TRY_LOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ (void)(thread); })
#endif
/* RTR list */
#define RTR_LOCK(core) \
({ corelock_lock(&cores[core].rtr_cl); })
@ -993,27 +965,6 @@ static void wakeup_thread_release(struct thread_entry *thread)
inherit_priority(bl, bl, blt, newblpr);
}
/*---------------------------------------------------------------------------
* No threads must be blocked waiting for this thread except for it to exit.
* The alternative is more elaborate cleanup and object registration code.
* Check this for risk of silent data corruption when objects with
* inheritable blocking are abandoned by the owner - not precise but may
* catch something.
*---------------------------------------------------------------------------
*/
static void __attribute__((noinline)) check_for_obj_waiters(
const char *function, struct thread_entry *thread)
{
/* Only one bit in the mask should be set with a frequency on 1 which
* represents the thread's own base priority */
if (priobit_popcount(&thread->pdist.mask) != 1 ||
thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1)
{
unsigned char name[32];
thread_get_name(name, 32, thread);
panicf("%s->%s with obj. waiters", function, name);
}
}
#endif /* HAVE_PRIORITY_SCHEDULING */
/*---------------------------------------------------------------------------
@ -1519,31 +1470,6 @@ void block_thread(struct thread_entry *current, int timeout)
#endif /* HAVE_PRIORITY_SCHEDULING */
}
/*---------------------------------------------------------------------------
* Wakeup an entire queue of threads - returns bitwise-or of return bitmask
* from each operation or THREAD_NONE of nothing was awakened. Object owning
* the queue must be locked first.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*---------------------------------------------------------------------------
*/
unsigned int thread_queue_wake(struct thread_entry **list)
{
unsigned result = THREAD_NONE;
for (;;)
{
unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
if (rc == THREAD_NONE)
break; /* No more threads */
result |= rc;
}
return result;
}
/*---------------------------------------------------------------------------
* Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
*---------------------------------------------------------------------------
@ -1580,7 +1506,7 @@ static struct thread_entry * find_empty_thread_slot(void)
struct thread_entry *t = &threads[n];
LOCK_THREAD(t);
if (t->state == STATE_KILLED IF_COP( && t->name != THREAD_DESTRUCT ))
if (t->state == STATE_KILLED)
{
/* Slot is empty - leave it locked and caller will unlock */
thread = t;
@ -1836,21 +1762,14 @@ void thread_exit(void)
corelock_lock(&current->waiter_cl);
LOCK_THREAD(current);
#if defined (ALLOW_REMOVE_THREAD) && NUM_CORES > 1
if (current->name == THREAD_DESTRUCT)
{
/* Thread being killed - become a waiter */
unsigned int id = current->id;
UNLOCK_THREAD(current);
corelock_unlock(&current->waiter_cl);
thread_wait(id);
THREAD_PANICF("thread_exit->WK:*R", current);
}
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
check_for_obj_waiters("thread_exit", current);
#endif
/* Only one bit in the mask should be set with a frequency on 1 which
* represents the thread's own base priority otherwise threads are waiting
* on an abandoned object */
if (priobit_popcount(&current->pdist.mask) != 1 ||
current->pdist.hist[priobit_ffs(&current->pdist.mask)] > 1)
thread_panicf("abandon ship!", current);
#endif /* HAVE_PRIORITY_SCHEDULING */
if (current->tmo.prev != NULL)
{
@ -1872,186 +1791,6 @@ void thread_exit(void)
thread_final_exit(current);
}
#ifdef ALLOW_REMOVE_THREAD
/*---------------------------------------------------------------------------
* Remove a thread from the scheduler. Not The Right Way to Do Things in
* normal programs.
*
* Parameter is the ID as returned from create_thread().
*
* Use with care on threads that are not under careful control as this may
* leave various objects in an undefined state.
*---------------------------------------------------------------------------
*/
void remove_thread(unsigned int thread_id)
{
#ifdef HAVE_CORELOCK_OBJECT
/* core is not constant here because of core switching */
unsigned int core = CURRENT_CORE;
unsigned int old_core = NUM_CORES;
struct corelock *ocl = NULL;
#else
const unsigned int core = CURRENT_CORE;
#endif
struct thread_entry *current = cores[core].running;
struct thread_entry *thread = thread_id_entry(thread_id);
unsigned state;
int oldlevel;
if (thread == current)
thread_exit(); /* Current thread - do normal exit */
oldlevel = disable_irq_save();
corelock_lock(&thread->waiter_cl);
LOCK_THREAD(thread);
state = thread->state;
if (thread->id != thread_id || state == STATE_KILLED)
goto thread_killed;
#if NUM_CORES > 1
if (thread->name == THREAD_DESTRUCT)
{
/* Thread being killed - become a waiter */
UNLOCK_THREAD(thread);
corelock_unlock(&thread->waiter_cl);
restore_irq(oldlevel);
thread_wait(thread_id);
return;
}
thread->name = THREAD_DESTRUCT; /* Slot can't be used for now */
#ifdef HAVE_PRIORITY_SCHEDULING
check_for_obj_waiters("remove_thread", thread);
#endif
if (thread->core != core)
{
/* Switch cores and safely extract the thread there */
/* Slot HAS to be unlocked or a deadlock could occur which means other
* threads have to be guided into becoming thread waiters if they
* attempt to remove it. */
unsigned int new_core = thread->core;
corelock_unlock(&thread->waiter_cl);
UNLOCK_THREAD(thread);
restore_irq(oldlevel);
old_core = switch_core(new_core);
oldlevel = disable_irq_save();
corelock_lock(&thread->waiter_cl);
LOCK_THREAD(thread);
state = thread->state;
core = new_core;
/* Perform the extraction and switch ourselves back to the original
processor */
}
#endif /* NUM_CORES > 1 */
if (thread->tmo.prev != NULL)
{
/* Clean thread off the timeout list if a timeout check hasn't
* run yet */
remove_from_list_tmo(thread);
}
#ifdef HAVE_SCHEDULER_BOOSTCTRL
/* Cancel CPU boost if any */
boost_thread(thread, false);
#endif
IF_COP( retry_state: )
switch (state)
{
case STATE_RUNNING:
RTR_LOCK(core);
/* Remove thread from ready to run tasks */
remove_from_list_l(&cores[core].running, thread);
rtr_subtract_entry(core, thread->priority);
RTR_UNLOCK(core);
break;
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
/* Remove thread from the queue it's blocked on - including its
* own if waiting there */
#if NUM_CORES > 1
if (&thread->waiter_cl != thread->obj_cl)
{
ocl = thread->obj_cl;
if (UNLIKELY(corelock_try_lock(ocl) == 0))
{
UNLOCK_THREAD(thread);
corelock_lock(ocl);
LOCK_THREAD(thread);
if (UNLIKELY(thread->state != state))
{
/* Something woke the thread */
state = thread->state;
corelock_unlock(ocl);
goto retry_state;
}
}
}
#endif
#ifdef HAVE_WAKEUP_EXT_CB
if (thread->wakeup_ext_cb != NULL)
thread->wakeup_ext_cb(thread);
#endif
#ifdef HAVE_PRIORITY_SCHEDULING
/* Remove thread's priority influence from its chain if needed */
if (thread->blocker != NULL)
wakeup_priority_protocol_release(thread);
else
#endif
remove_from_list_l(thread->bqp, thread);
#if NUM_CORES > 1
if (ocl != NULL)
corelock_unlock(ocl);
#endif
break;
/* Otherwise thread is frozen and hasn't run yet */
}
new_thread_id(thread_id, thread);
thread->state = STATE_KILLED;
/* If thread was waiting on itself, it will have been removed above.
* The wrong order would result in waking the thread first and deadlocking
* since the slot is already locked. */
thread_queue_wake(&thread->queue);
thread->name = NULL;
thread_killed: /* Thread was already killed */
/* Removal complete - safe to unlock and reenable interrupts */
corelock_unlock(&thread->waiter_cl);
UNLOCK_THREAD(thread);
restore_irq(oldlevel);
#if NUM_CORES > 1
if (old_core < NUM_CORES)
{
/* Did a removal on another processor's thread - switch back to
native core */
switch_core(old_core);
}
#endif
}
#endif /* ALLOW_REMOVE_THREAD */
#ifdef HAVE_PRIORITY_SCHEDULING
/*---------------------------------------------------------------------------
* Sets the thread's relative base priority for the core it runs on. Any
@ -2205,20 +1944,9 @@ unsigned int switch_core(unsigned int new_core)
return core;
}
int oldlevel = disable_irq_save();
disable_irq();
LOCK_THREAD(current);
if (current->name == THREAD_DESTRUCT)
{
/* Thread being killed - deactivate and let process complete */
unsigned int id = current->id;
UNLOCK_THREAD(current);
restore_irq(oldlevel);
thread_wait(id);
/* Should never be reached */
THREAD_PANICF("switch_core->D:*R", current);
}
/* Get us off the running list for the current core */
RTR_LOCK(core);
remove_from_list_l(&cores[core].running, current);
@ -2274,7 +2002,7 @@ unsigned int switch_core(unsigned int new_core)
* are safe to perform.
*---------------------------------------------------------------------------
*/
void init_threads(void)
void INIT_ATTR init_threads(void)
{
const unsigned int core = CURRENT_CORE;
struct thread_entry *thread;
@ -2353,82 +2081,6 @@ void init_threads(void)
#endif
}
/* Shared stack scan helper for thread_stack_usage and idle_stack_usage */
#if NUM_CORES == 1
static inline int stack_usage(uintptr_t *stackptr, size_t stack_size)
#else
static int stack_usage(uintptr_t *stackptr, size_t stack_size)
#endif
{
unsigned int stack_words = stack_size / sizeof (uintptr_t);
unsigned int i;
int usage = 0;
for (i = 0; i < stack_words; i++)
{
if (stackptr[i] != DEADBEEF)
{
usage = ((stack_words - i) * 100) / stack_words;
break;
}
}
return usage;
}
/*---------------------------------------------------------------------------
* Returns the maximum percentage of stack a thread ever used while running.
* NOTE: Some large buffer allocations that don't use enough the buffer to
* overwrite stackptr[0] will not be seen.
*---------------------------------------------------------------------------
*/
int thread_stack_usage(const struct thread_entry *thread)
{
if (LIKELY(thread->stack_size > 0))
return stack_usage(thread->stack, thread->stack_size);
return 0;
}
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
* runtime.
*---------------------------------------------------------------------------
*/
int idle_stack_usage(unsigned int core)
{
return stack_usage(idle_stacks[core], IDLE_STACK_SIZE);
}
#endif
/*---------------------------------------------------------------------------
* Fills in the buffer with the specified thread's name. If the name is NULL,
* empty, or the thread is in destruct state a formatted ID is written
* instead.
*---------------------------------------------------------------------------
*/
void thread_get_name(char *buffer, int size,
struct thread_entry *thread)
{
if (size <= 0)
return;
*buffer = '\0';
if (thread)
{
/* Display thread name if one or ID if none */
const char *name = thread->name;
const char *fmt = "%s";
if (name == NULL IF_COP(|| name == THREAD_DESTRUCT) || *name == '\0')
{
name = (const char *)(uintptr_t)thread->id;
fmt = "%04lX";
}
snprintf(buffer, size, fmt, name);
}
}
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false

View File

@ -1,4 +1,4 @@
#include "thread.h"
#include "../thread-internal.h"
int * __errno(void)
{
return &thread_self_entry()->__errno;

View File

@ -45,7 +45,7 @@ extern uintptr_t cpu_idlestackbegin[];
extern uintptr_t cpu_idlestackend[];
extern uintptr_t cop_idlestackbegin[];
extern uintptr_t cop_idlestackend[];
static uintptr_t * const idle_stacks[NUM_CORES] =
uintptr_t * const idle_stacks[NUM_CORES] =
{
[CPU] = cpu_idlestackbegin,
[COP] = cop_idlestackbegin
@ -92,9 +92,7 @@ static inline void NORETURN_ATTR __attribute__((always_inline))
{
asm volatile (
"cmp %1, #0 \n" /* CPU? */
"ldrne r0, =commit_dcache \n" /* No? write back data */
"movne lr, pc \n"
"bxne r0 \n"
"blne commit_dcache \n"
"mov r0, %0 \n" /* copy thread parameter */
"mov sp, %2 \n" /* switch to idle stack */
"bl thread_final_exit_do \n" /* finish removal */
@ -163,9 +161,7 @@ static void __attribute__((naked))
"ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
"mov r1, #0 \n" /* Clear start address */
"str r1, [r0, #40] \n"
"ldr r0, =commit_discard_idcache \n" /* Invalidate new core's cache */
"mov lr, pc \n"
"bx r0 \n"
"bl commit_discard_idcache \n" /* Invalidate new core's cache */
"ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
: : "i"(IDLE_STACK_WORDS)
);

View File

@ -28,10 +28,7 @@
#include <setjmp.h>
#include "system-sdl.h"
#include "thread-sdl.h"
#include "system.h"
#include "kernel.h"
#include "thread.h"
#include "debug.h"
#include "../kernel-internal.h"
#include "core_alloc.h"
/* Define this as 1 to show informational messages that are not errors. */
@ -165,6 +162,7 @@ static struct thread_entry * find_empty_thread_slot(void)
/* Initialize SDL threading */
void init_threads(void)
{
static uintptr_t main_stack[] = { DEADBEEF, 0 };
struct thread_entry *thread;
int n;
@ -187,8 +185,8 @@ void init_threads(void)
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
thread = &threads[0];
thread->stack = (uintptr_t *)" ";
thread->stack_size = 8;
thread->stack = main_stack;
thread->stack_size = sizeof (main_stack);
thread->name = "main";
thread->state = STATE_RUNNING;
thread->context.s = SDL_CreateSemaphore(0);
@ -439,23 +437,6 @@ unsigned int wakeup_thread_(struct thread_entry **list)
return THREAD_NONE;
}
unsigned int thread_queue_wake(struct thread_entry **list)
{
unsigned int result = THREAD_NONE;
for (;;)
{
unsigned int rc = wakeup_thread_(list);
if (rc == THREAD_NONE)
break;
result |= rc;
}
return result;
}
void thread_thaw(unsigned int thread_id)
{
struct thread_entry *thread = thread_id_entry(thread_id);
@ -542,6 +523,10 @@ unsigned int create_thread(void (*function)(void),
return 0;
}
unsigned int stack_words = stack_size / sizeof (uintptr_t);
for (unsigned int i = stack_words; i-- > 0;)
((uintptr_t *)stack)[i] = DEADBEEF;
thread->stack = stack;
thread->stack_size = stack_size;
thread->name = name;
@ -557,11 +542,7 @@ unsigned int create_thread(void (*function)(void),
return thread->id;
}
#ifndef ALLOW_REMOVE_THREAD
static void remove_thread(unsigned int thread_id)
#else
void remove_thread(unsigned int thread_id)
#endif
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *thread = thread_id_entry(thread_id);
@ -657,41 +638,6 @@ void thread_wait(unsigned int thread_id)
}
}
int thread_stack_usage(const struct thread_entry *thread)
{
return 50;
(void)thread;
}
/* Return name if one or ID if none */
void thread_get_name(char *buffer, int size,
struct thread_entry *thread)
{
if (size <= 0)
return;
*buffer = '\0';
if (thread)
{
/* Display thread name if one or ID if none */
bool named = thread->name && *thread->name;
const char *fmt = named ? "%s" : "%04lX";
intptr_t name = named ?
(intptr_t)thread->name : (intptr_t)thread->id;
snprintf(buffer, size, fmt, name);
}
}
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
@ -707,11 +653,6 @@ void thread_get_name(char *buffer, int size,
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
@ -725,10 +666,5 @@ unsigned sleep(unsigned ticks)
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* handled */
switch_thread();
}