调度概念
进程调度
按照某种调度算法从就绪队列中选取进程分配CPU,主要是协调对CPU等的资源使用。进程调度目标是最大限度地利用CPU时间,只要有可以执行的进程,那么总会有进程正在执行,只要进程数目比处理器个数多,就注定某一个给定时刻会有一些进程不能执行。
进程切换
CPU资源的当前占有者进行切换,将Context(CPU状态,主要是寄存器的状态)保存至当前进程的TCB中,并恢复下一个进程的上下文。
进程状态
进程状态进程在运行过程中一般存在三种状态:
- 就绪: 进程已分配到除CPU之外的所有必要资源,只要获取CPU便可执行;
- 执行: 当前进程正在CPU上运行;
- 阻塞: 正在执行的进程,由于等待某些资源无法执行,放弃CPU处于阻塞状态
数据结构
与调度相关的数据结构包括:
- 任务描述符,在nuttx中是以
struct tcb_s
来定义任务- 任务状态,对应进程三状态,及其他相关状态,nuttx中以
enum tstate_e
来定义- 任务队列,存放不同状态的task
任务描述符
FAR struct wdog_s; /* Forward reference */
struct tcb_s
{
/* Fields used to support list management *************************************/
/* 双向链表,用于将相同状态的task连成任务队列 */
FAR struct tcb_s *flink; /* Doubly linked list */
FAR struct tcb_s *blink;
/* Task Group *****************************************************************/
#ifdef HAVE_TASK_GROUP
FAR struct task_group_s *group; /* Pointer to shared task group data */
#endif
/* Task Management Fields *****************************************************/
/* 任务管理的相关字段 */
pid_t pid; /* This is the ID of the thread */
start_t start; /* Thread start function */
entry_t entry; /* Entry Point into the thread */
uint8_t sched_priority; /* Current priority of the thread */
uint8_t init_priority; /* Initial priority of the thread */
#ifdef CONFIG_PRIORITY_INHERITANCE
#if CONFIG_SEM_NNESTPRIO > 0
uint8_t npend_reprio; /* Number of nested reprioritizations */
uint8_t pend_reprios[CONFIG_SEM_NNESTPRIO];
#endif
uint8_t base_priority; /* "Normal" priority of the thread */
#endif
/* 进程的状态,对应后边会描述的不同进程状态 */
uint8_t task_state; /* Current state of the thread */
#ifdef CONFIG_SMP
uint8_t cpu; /* CPU index if running or assigned */
cpu_set_t affinity; /* Bit set of permitted CPUs */
#endif
uint16_t flags; /* Misc. general status flags */
int16_t lockcount; /* 0=preemptable (not-locked) */
#ifdef CONFIG_SMP
int16_t irqcount; /* 0=interrupts enabled */
#endif
#ifdef CONFIG_CANCELLATION_POINTS
int16_t cpcount; /* Nested cancellation point count */
#endif
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
int32_t timeslice; /* RR timeslice OR Sporadic budget */
/* interval remaining */
#endif
#ifdef CONFIG_SCHED_SPORADIC
FAR struct sporadic_s *sporadic; /* Sporadic scheduling parameters */
#endif
FAR struct wdog_s *waitdog; /* All timed waits use this timer */
/* 每个task都有自己的栈区域 */
/* Stack-Related Fields *******************************************************/
size_t adj_stack_size; /* Stack size after adjustment */
/* for hardware, processor, etc. */
/* (for debug purposes only) */
FAR void *stack_alloc_ptr; /* Pointer to allocated stack */
/* Need to deallocate stack */
FAR void *adj_stack_ptr; /* Adjusted stack_alloc_ptr for HW */
/* The initial stack pointer value */
/* External Module Support ****************************************************/
#ifdef CONFIG_PIC
FAR struct dspace_s *dspace; /* Allocated area for .bss and .data */
#endif
/* POSIX Semaphore Control Fields *********************************************/
sem_t *waitsem; /* Semaphore ID waiting on */
/* POSIX Signal Control Fields ************************************************/
#ifndef CONFIG_DISABLE_SIGNALS
sigset_t sigprocmask; /* Signals that are blocked */
sigset_t sigwaitmask; /* Waiting for pending signals */
sq_queue_t sigpendactionq; /* List of pending signal actions */
sq_queue_t sigpostedq; /* List of posted signals */
siginfo_t sigunbinfo; /* Signal info when task unblocked */
#endif
/* POSIX Named Message Queue Fields *******************************************/
#ifndef CONFIG_DISABLE_MQUEUE
FAR struct mqueue_inode_s *msgwaitq; /* Waiting for this message queue */
#endif
/* Library related fields *****************************************************/
int pterrno; /* Current per-thread errno */
/* State save areas ***********************************************************/
/* The form and content of these fields are platform-specific. */
struct xcptcontext xcp; /* Interrupt register save area */
#if CONFIG_TASK_NAME_SIZE > 0
char name[CONFIG_TASK_NAME_SIZE+1]; /* Task name (with NUL terminator) */
#endif
};
在
struct tcb_s
中,有一个数据结构需要了解一下,那就是struct task_group_s
,用于描述任务组的信息,其中里边包括了子任务状态、环境变量、文件描述符、Sockets等信息,这就能对应到Linux系统中的任务描述符包含的信息了。
/* struct task_group_s ***********************************************************/
/* All threads created by pthread_create belong in the same task group (along with
* the thread of the original task). struct task_group_s is a shared structure
* referenced by the TCB of each thread that is a member of the task group.
*
* This structure should contain *all* resources shared by tasks and threads that
* belong to the same task group:
*
* Child exit status
* Environment variables
* PIC data space and address environments
* File descriptors
* FILE streams
* Sockets
* Address environments.
*
* Each instance of struct task_group_s is reference counted. Each instance is
* created with a reference count of one. The reference incremented when each
* thread joins the group and decremented when each thread exits, leaving the
* group. When the reference count decrements to zero, the struct task_group_s
* is free.
*/
#ifdef HAVE_TASK_GROUP
#ifndef CONFIG_DISABLE_PTHREAD
struct join_s; /* Forward reference */
/* Defined in sched/pthread/pthread.h */
#endif
struct task_group_s
{
#if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV)
struct task_group_s *flink; /* Supports a singly linked list */
gid_t tg_gid; /* The ID of this task group */
#endif
#ifdef HAVE_GROUP_MEMBERS
gid_t tg_pgid; /* The ID of the parent task group */
#endif
#if !defined(CONFIG_DISABLE_PTHREAD) && defined(CONFIG_SCHED_HAVE_PARENT)
pid_t tg_task; /* The ID of the task within the group */
#endif
uint8_t tg_flags; /* See GROUP_FLAG_* definitions */
/* Group membership ***********************************************************/
uint8_t tg_nmembers; /* Number of members in the group */
#ifdef HAVE_GROUP_MEMBERS
uint8_t tg_mxmembers; /* Number of members in allocation */
FAR pid_t *tg_members; /* Members of the group */
#endif
#if defined(CONFIG_SCHED_ATEXIT) && !defined(CONFIG_SCHED_ONEXIT)
/* atexit support ************************************************************/
# if defined(CONFIG_SCHED_ATEXIT_MAX) && CONFIG_SCHED_ATEXIT_MAX > 1
atexitfunc_t tg_atexitfunc[CONFIG_SCHED_ATEXIT_MAX];
# else
atexitfunc_t tg_atexitfunc; /* Called when exit is called. */
# endif
#endif
#ifdef CONFIG_SCHED_ONEXIT
/* on_exit support ***********************************************************/
# if defined(CONFIG_SCHED_ONEXIT_MAX) && CONFIG_SCHED_ONEXIT_MAX > 1
onexitfunc_t tg_onexitfunc[CONFIG_SCHED_ONEXIT_MAX];
FAR void *tg_onexitarg[CONFIG_SCHED_ONEXIT_MAX];
# else
onexitfunc_t tg_onexitfunc; /* Called when exit is called. */
FAR void *tg_onexitarg; /* The argument passed to the function */
# endif
#endif
#ifdef CONFIG_SCHED_HAVE_PARENT
/* Child exit status **********************************************************/
#ifdef CONFIG_SCHED_CHILD_STATUS
FAR struct child_status_s *tg_children; /* Head of a list of child status */
#endif
#ifndef HAVE_GROUP_MEMBERS
/* REVISIT: What if parent thread exits? Should use tg_pgid. */
pid_t tg_ppid; /* This is the ID of the parent thread */
#ifndef CONFIG_SCHED_CHILD_STATUS
uint16_t tg_nchildren; /* This is the number active children */
#endif
#endif /* HAVE_GROUP_MEMBERS */
#endif /* CONFIG_SCHED_HAVE_PARENT */
#if defined(CONFIG_SCHED_WAITPID) && !defined(CONFIG_SCHED_HAVE_PARENT)
/* waitpid support ************************************************************/
/* Simple mechanism used only when there is no support for SIGCHLD */
uint8_t tg_nwaiters; /* Number of waiters */
sem_t tg_exitsem; /* Support for waitpid */
int *tg_statloc; /* Location to return exit status */
#endif
#ifndef CONFIG_DISABLE_PTHREAD
/* Pthreads *******************************************************************/
/* Pthread join Info: */
sem_t tg_joinsem; /* Mutually exclusive access to join data */
FAR struct join_s *tg_joinhead; /* Head of a list of join data */
FAR struct join_s *tg_jointail; /* Tail of a list of join data */
uint8_t tg_nkeys; /* Number pthread keys allocated */
#endif
#ifndef CONFIG_DISABLE_SIGNALS
/* POSIX Signal Control Fields ************************************************/
sq_queue_t tg_sigactionq; /* List of actions for signals */
sq_queue_t tg_sigpendingq; /* List of pending signals */
#endif
#ifndef CONFIG_DISABLE_ENVIRON
/* Environment variables ******************************************************/
size_t tg_envsize; /* Size of environment string allocation */
FAR char *tg_envp; /* Allocated environment strings */
#endif
/* PIC data space and address environments ************************************/
/* Logically the PIC data space belongs here (see struct dspace_s). The
* current logic needs review: There are differences in the away that the
* life of the PIC data is managed.
*/
#if CONFIG_NFILE_DESCRIPTORS > 0
/* File descriptors ***********************************************************/
struct filelist tg_filelist; /* Maps file descriptor to file */
#endif
#if CONFIG_NFILE_STREAMS > 0
/* FILE streams ***************************************************************/
/* In a flat, single-heap build. The stream list is allocated with this
* structure. But kernel mode with a kernel allocator, it must be separately
* allocated using a user-space allocator.
*/
#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
defined(CONFIG_MM_KERNEL_HEAP)
FAR struct streamlist *tg_streamlist;
#else
struct streamlist tg_streamlist; /* Holds C buffered I/O info */
#endif
#endif
#if CONFIG_NSOCKET_DESCRIPTORS > 0
/* Sockets ********************************************************************/
struct socketlist tg_socketlist; /* Maps socket descriptor to socket */
#endif
#ifndef CONFIG_DISABLE_MQUEUE
/* POSIX Named Message Queue Fields *******************************************/
sq_queue_t tg_msgdesq; /* List of opened message queues */
#endif
#ifdef CONFIG_ARCH_ADDRENV
/* Address Environment ********************************************************/
group_addrenv_t tg_addrenv; /* Task group address environment */
#endif
#ifdef CONFIG_MM_SHM
/* Shared Memory **************************************************************/
struct group_shm_s tg_shm; /* Task shared memory logic */
#endif
};
#endif
基于
struct tcb_s
又扩展了两个数据结构,分别用于描述task和线程:
/* struct task_tcb_s *************************************************************/
/* This is the particular form of the task control block (TCB) structure used by
* tasks (and kernel threads). There are two TCB forms: one for pthreads and
* one for tasks. Both share the common TCB fields (which must appear at the
* top of the structure) plus additional fields unique to tasks and threads.
* Having separate structures for tasks and pthreads adds some complexity, but
* saves memory in that it prevents pthreads from being burdened with the
* overhead required for tasks (and vice versa).
*/
struct task_tcb_s
{
/* Common TCB fields **********************************************************/
struct tcb_s cmn; /* Common TCB fields */
/* Task Management Fields *****************************************************/
#ifdef CONFIG_SCHED_STARTHOOK
starthook_t starthook; /* Task startup function */
FAR void *starthookarg; /* The argument passed to the function */
#endif
/* [Re-]start name + start-up parameters **************************************/
FAR char **argv; /* Name+start-up parameters */
};
/* struct pthread_tcb_s **********************************************************/
/* This is the particular form of the task control block (TCB) structure used by
* pthreads. There are two TCB forms: one for pthreads and one for tasks. Both
* share the common TCB fields (which must appear at the top of the structure)
* plus additional fields unique to tasks and threads. Having separate structures
* for tasks and pthreads adds some complexity, but saves memory in that it
* prevents pthreads from being burdened with the overhead required for tasks
* (and vice versa).
*/
#ifndef CONFIG_DISABLE_PTHREAD
struct pthread_tcb_s
{
/* Common TCB fields **********************************************************/
struct tcb_s cmn; /* Common TCB fields */
/* Task Management Fields *****************************************************/
pthread_addr_t arg; /* Startup argument */
FAR void *joininfo; /* Detach-able info to support join */
/* Clean-up stack *************************************************************/
#ifdef CONFIG_PTHREAD_CLEANUP
/* tos - The index to the next avaiable entry at the top of the stack.
* stack - The pre-allocated clean-up stack memory.
*/
uint8_t tos;
struct pthread_cleanup_s stack[CONFIG_PTHREAD_CLEANUP_STACKSIZE];
#endif
/* POSIX Thread Specific Data *************************************************/
#if CONFIG_NPTHREAD_KEYS > 0
FAR void *pthread_data[CONFIG_NPTHREAD_KEYS];
#endif
};
#endif /* !CONFIG_DISABLE_PTHREAD */
任务状态
/* General Task Management Types ************************************************/
/* This is the type of the task_state field of the TCB. NOTE: the order and
* content of this enumeration is critical since there are some OS tables indexed
* by these values. The range of values is assumed to fit into a uint8_t in
* struct tcb_s.
*/
enum tstate_e
{
TSTATE_TASK_INVALID = 0, /* INVALID - The TCB is uninitialized */
TSTATE_TASK_PENDING, /* READY_TO_RUN - Pending preemption unlock */
TSTATE_TASK_READYTORUN, /* READY-TO-RUN - But not running */
#ifdef CONFIG_SMP
TSTATE_TASK_ASSIGNED, /* READY-TO-RUN - Not running, but assigned to a CPU */
#endif
TSTATE_TASK_RUNNING, /* READY_TO_RUN - And running */
TSTATE_TASK_INACTIVE, /* BLOCKED - Initialized but not yet activated */
TSTATE_WAIT_SEM, /* BLOCKED - Waiting for a semaphore */
#ifndef CONFIG_DISABLE_SIGNALS
TSTATE_WAIT_SIG, /* BLOCKED - Waiting for a signal */
#endif
#ifndef CONFIG_DISABLE_MQUEUE
TSTATE_WAIT_MQNOTEMPTY, /* BLOCKED - Waiting for a MQ to become not empty. */
TSTATE_WAIT_MQNOTFULL, /* BLOCKED - Waiting for a MQ to become not full. */
#endif
#ifdef CONFIG_PAGING
TSTATE_WAIT_PAGEFILL, /* BLOCKED - Waiting for page fill */
#endif
NUM_TASK_STATES /* Must be last */
};
typedef enum tstate_e tstate_t;
任务队列
/* Task Lists ***************************************************************/
/* The state of a task is indicated both by the task_state field of the TCB
* and by a series of task lists. All of these tasks lists are declared
* below. Although it is not always necessary, most of these lists are
* prioritized so that common list handling logic can be used (only the
* g_readytorun, the g_pendingtasks, and the g_waitingforsemaphore lists
* need to be prioritized).
*/
/* This is the list of all tasks that are ready to run. This is a
* prioritized list with head of the list holding the highest priority
* (unassigned) task. In the non-SMP cae, the head of this list is the
* currently active task and the tail of this list, the lowest priority
* task, is always the IDLE task.
*/
volatile dq_queue_t g_readytorun;
#ifdef CONFIG_SMP
/* In order to support SMP, the function of the g_readytorun list changes,
* The g_readytorun is still used but in the SMP cae it will contain only:
*
* - Only tasks/threads that are eligible to run, but not currently running,
* and
* - Tasks/threads that have not been assigned to a CPU.
*
* Otherwise, the TCB will be reatined in an assigned task list,
* g_assignedtasks. As its name suggests, on 'g_assignedtasks queue for CPU
* 'n' would contain only tasks/threads that are assigned to CPU 'n'. Tasks/
* threads would be assigned a particular CPU by one of two mechanisms:
*
* - (Semi-)permanently through an RTOS interfaces such as
* pthread_attr_setaffinity(), or
* - Temporarily through scheduling logic when a previously unassigned task
* is made to run.
*
* Tasks/threads that are assigned to a CPU via an interface like
* pthread_attr_setaffinity() would never go into the g_readytorun list, but
* would only go into the g_assignedtasks[n] list for the CPU 'n' to which
* the thread has been assigned. Hence, the g_readytorun list would hold
* only unassigned tasks/threads.
*
* Like the g_readytorun list in in non-SMP case, each g_assignedtask[] list
* is prioritized: The head of the list is the currently active task on this
* CPU. Tasks after the active task are ready-to-run and assigned to this
* CPU. The tail of this assigned task list, the lowest priority task, is
* always the CPU's IDLE task.
*/
volatile dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];
#endif
/* This is the list of all tasks that are ready-to-run, but cannot be placed
* in the g_readytorun list because: (1) They are higher priority than the
* currently active task at the head of the g_readytorun list, and (2) the
* currently active task has disabled pre-emption.
*/
volatile dq_queue_t g_pendingtasks;
/* This is the list of all tasks that are blocked waiting for a semaphore */
volatile dq_queue_t g_waitingforsemaphore;
/* This is the list of all tasks that are blocked waiting for a signal */
#ifndef CONFIG_DISABLE_SIGNALS
volatile dq_queue_t g_waitingforsignal;
#endif
/* This is the list of all tasks that are blocked waiting for a message
* queue to become non-empty.
*/
#ifndef CONFIG_DISABLE_MQUEUE
volatile dq_queue_t g_waitingformqnotempty;
#endif
/* This is the list of all tasks that are blocked waiting for a message
* queue to become non-full.
*/
#ifndef CONFIG_DISABLE_MQUEUE
volatile dq_queue_t g_waitingformqnotfull;
#endif
/* This is the list of all tasks that are blocking waiting for a page fill */
#ifdef CONFIG_PAGING
volatile dq_queue_t g_waitingforfill;
#endif
/* This the list of all tasks that have been initialized, but not yet
* activated. NOTE: This is the only list that is not prioritized.
*/
volatile dq_queue_t g_inactivetasks;
调度策略
调度策略又称调度算法,根据系统的资源分配策略所规定的资源分配算法。在代码实现中,看到的就是将task在不同的任务队列中进行移动。在Nuttx中支持的调度算法有:
- FIFO,先来先服务,在优先级相同时的一种调度策略,FIFO会导致后面的任务延时较大
- Round Robin,时间片轮转,在优先级相同时的一种调度策略,比如一个task分配200ms的时间片,在同一优先级时,当前task执行完200ms后,让出CPU,切换至队列中的下一个task。
- Sporadic,偶发调度,sporadic的引入主要是为了去除周期性和非周期性事件对实时性的影响,相比RR策略,它可以在一个设定的时间段里限制线程执行时间的长短。当一个系统同时处理周期性和非周期性事件,对其进行速率单调性分析(Rate Monotonic Analysis)时,这个偶发调度算法是必须的。
/* POSIX-like scheduling policies */
#define SCHED_FIFO 1 /* FIFO priority scheduling policy */
#define SCHED_RR 2 /* Round robin scheduling policy */
#define SCHED_SPORADIC 3 /* Sporadic scheduling policy */
#define SCHED_OTHER 4 /* Not supported */
nuttx支持根据优先级进行抢占,以便支持实时性,使用下边的接口来设置调度策略和优先级:
/****************************************************************************
* Name:sched_setscheduler
*
* Description:
* sched_setscheduler() sets both the scheduling policy and the priority
* for the task identified by pid. If pid equals zero, the scheduler of
* the calling task will be set. The parameter 'param' holds the priority
* of the thread under the new policy.
*
* Inputs:
* pid - the task ID of the task to modify. If pid is zero, the calling
* task is modified.
* policy - Scheduling policy requested (either SCHED_FIFO or SCHED_RR)
* param - A structure whose member sched_priority is the new priority.
* The range of valid priority numbers is from SCHED_PRIORITY_MIN
* through SCHED_PRIORITY_MAX.
*
* Return Value:
* On success, sched_setscheduler() returns OK (zero). On error, ERROR
* (-1) is returned, and errno is set appropriately:
*
* EINVAL The scheduling policy is not one of the recognized policies.
* ESRCH The task whose ID is pid could not be found.
*
* Assumptions:
*
****************************************************************************/
int sched_setscheduler(pid_t pid, int policy, FAR const struct sched_param *param)
调度点
进程的调度并不是任意时刻都能进行,必须在某些时间点上完成。调度器这个词容易带来理解误区:有一个scheduler在运行,类似于一个内核线程,由它去完成任务的调度。实际上,调度器只是一个接口函数,当一个task在某些条件下,要让出CPU时,此时就会调用到schedule的接口函数,从而完成进程的切换。
task schedule
常见的调度点有:
- 时间片轮转调度时机,主要是在system tick时,在timer的中断中调用
sched_process_timer()
函数,周期性的处理tick,当优先级发生转换时,会调用up_reprioritize_ptr()
函数,并会出发context的切换。- 抢占式调度时机,包括在等待信号量、信号、消息队列、环境变量、调度器设置、任务创建与恢复、yield等。
从代码来入手分析就能清晰看到调度点了,以
arm926
为例,在路径arch/arm/src/arm
目录下,有两个函数up_saveusercontext(), up_fullcontextrestore()
,分别用于context的保存和恢复。在任务调度的时候,最终都会调用到这两个函数来完成切换。
在arch/arm/src/arm
下,分别有up_block_task(), up_unblock_task(), up_reprioritizertr(), up_releasepending(),
四个函数调用了context切换的函数接口, 其中up_releasepending()
函数在sched_unlock()
函数中调用,因此,便得到了上下文切换的四个上层函数:
up_block_task()
up_unblock_task()
up_reprioritize_rtr()
sched_unlock()
凡是调用到上述四个函数的其中的一个,都可能带来任务的切换,也就是对应的调度点。
- 调用
up_block_task()
的接口有:
mq_receive() //message接收
mq_timedsend()
mq_send() //message发送
mq_timedreceive()
sem_wait() //信号量加锁
sigsuspend() //信号suspend
sigtimedwait() //信号等待
- 调用
up_unblock_task()
的接口有:
mq_receive() //message接收
mq_timedsend()
mq_send() //message发送
mq_timedreceive()
sem_post() //信号量解锁
sig_tcbdispatch() //信号dispatch
sem_waitirq()
mq_waitirq()
sig_timeout()
task_activate() //激活task,这个在task_create/task_vforkstart/时都会调用到
- 调用
sched_unlock()
的接口有:
mq_receive() //message接收
mq_timedsend()
mq_send() //message发送
mq_timedreceive()
mq_notify()
sem_reset()
sig_deliver()
sig_queueaction()
sig_findaction()
kill()
sig_mqnotempty()
sigprocmask()
sigqueue()
sigsuspend()
sigtimedwait()
sig_unmaskpendingsignal()
env_dup() //环境变量相关
getenv()
setenv()
unsetenv()
group_assigngid() //组相关
sched_getaffinity()
sched_getparam()
setaffinity()
sched_setparam()
sched_setscheduler()
waitid()
atexit()
task_signalparent()
on_exit()
posix_spawn()
task_assignpid()
thread_schedsetup()
task_restart()
task_spawn()
task_terminate()
lpwork_boostpriority()
lpwork_restorerepriority()
work_lpstart()
- 调用
up_reprioritizertr()
的接口有:
sched_roundrobin_process() //在进行RR调度时会调用
sched_running_setpriority() //在运行时的优先级设置
sched_readytorun_setpriority() //在ready-to-run时的优先级设置
Context切换
以
arm926
为例,底层实现context切换的代码位于arch/arm/src/arm/
目录下,分别为up_saveusercontext(), up_fullcontextrestore()
两个函数。
up_saveusercontext(), 完成的任务是将所有的寄存器保存至tcb->xcptcontext中,也就是将现场都保存好
.text
.globl up_saveusercontext
.type up_saveusercontext, function
up_saveusercontext:
/* On entry, a1 (r0) holds address of struct xcptcontext.
* Offset to the user region.
*/
/* Make sure that the return value will be non-zero (the
* value of the other volatile registers don't matter --
* r1-r3, ip). This function is called throught the
* noraml C calling conventions and the values of these
* registers cannot be assumed at the point of setjmp
* return.
*/
mov ip, #1
str ip, [r0, #(4*REG_R0)]
/* Save the volatile registers (plus r12 which really
* doesn't need to be saved)
*/
add r1, r0, #(4*REG_R4)
stmia r1, {r4-r14}
/* Save the current cpsr */
mrs r2, cpsr /* R3 = CPSR value */
add r1, r0, #(4*REG_CPSR)
str r2, [r1]
/* Finally save the return address as the PC so that we
* return to the exit from this function.
*/
add r1, r0, #(4*REG_PC)
str lr, [r1]
/* Return 0 */
mov r0, #0 /* Return value == 0 */
mov pc, lr /* Return */
.size up_saveusercontext, . - up_saveusercontext
up_fullcontextrestore(), 完成将tcb->xtcpcontext中保存的寄存器值恢复到CPU的寄存器中,将现场恢复。
.globl up_fullcontextrestore
.type up_fullcontextrestore, function
up_fullcontextrestore:
/* On entry, a1 (r0) holds address of the register save area */
/* Recover all registers except for r0, r1, R15, and CPSR */
add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
ldmia r1, {r2-r14} /* Recover registers */
/* Create a stack frame to hold the PC */
sub sp, sp, #4 /* Frame for one register */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
str r1, [sp] /* Save it in the stack */
/* Now we can restore the CPSR. We wait until we are completely
* finished with the context save data to do this. Restore the CPSR
* may re-enable and interrupts and we could be in a context
* where the save structure is only protected by interrupts being
* disabled.
*/
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
msr cpsr, r1 /* Set the CPSR */
/* Now recover r0 and r1
* Then return to the address at the stop of the stack,
* destroying the stack frame
*/
ldr r1, [r0, #(4*REG_R1)] /* Restore r1 register firstly */
ldr r0, [r0, #(4*REG_R0)]
ldmia sp!, {r15} /* Return pc value */
.size up_fullcontextrestore, . - up_fullcontextrestore
上文中提到过调用context切换的四个上层函数:
up_block_task(), up_unblock_task(), up_reprioritize_rtr(), up_release_pending()
,其中四个函数的实现机制都类似,以up_reprioritize_rtr()
为例:
/****************************************************************************
* Name: up_reprioritize_rtr
*
* Description:
* Called when the priority of a running or
* ready-to-run task changes and the reprioritization will
* cause a context switch. Two cases:
*
* 1) The priority of the currently running task drops and the next
* task in the ready to run list has priority.
* 2) An idle, ready to run task's priority has been raised above the
* the priority of the current, running task and it now has the
* priority.
*
* Inputs:
* tcb: The TCB of the task that has been reprioritized
* priority: The new task priority
*
****************************************************************************/
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
|| priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
|| priority > SCHED_PRIORITY_MAX
#endif
)
{
PANIC();
}
else
{
struct tcb_s *rtcb = this_task(); /* 获取g_readytorun队列的头结点 */
bool switch_needed;
sinfo("TCB=%p PRI=%d\n", tcb, priority);
/* Remove the tcb task from the ready-to-run list.
* sched_removereadytorun will return true if we just
* remove the head of the ready to run list.
*/
switch_needed = sched_removereadytorun(tcb); /* 将tcb从g_readytorun队列中移走 */
/* Setup up the new task priority */
tcb->sched_priority = (uint8_t)priority; /* 设置新的优先级 */
/* Return the task to the specified blocked task list.
* sched_addreadytorun will return true if the task was
* added to the new list. We will need to perform a context
* switch only if the EXCLUSIVE or of the two calls is non-zero
* (i.e., one and only one the calls changes the head of the
* ready-to-run list).
*/
switch_needed ^= sched_addreadytorun(tcb); /* 添加回g_readytorun队列中 */
/* Now, perform the context switch if one is needed */
if (switch_needed)
{
/* If we are going to do a context switch, then now is the right
* time to add any pending tasks back into the ready-to-run list.
* task list now
*/
if (g_pendingtasks.head)
{
sched_mergepending(); /* 在切换前,将pending队列和readytorun队列merge */
}
/* Update scheduler parameters */
sched_suspend_scheduler(rtcb);
/* Are we in an interrupt handler? */
if (CURRENT_REGS) /* CURRENT_REGS宏用于判断是否在中断中,在中断处理完后CURRENT_REGS会被赋值为NULL */
{
/* Yes, then we have to do things differently.
* Just copy the CURRENT_REGS into the OLD rtcb.
*/
up_savestate(rtcb->xcp.regs); /* 保存寄存器到老的rtcb中 */
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task(); /* 找到将要切换过去的新tcb */
/* Update scheduler parameters */
sched_resume_scheduler(rtcb); /* 更新参数 */
/* Then switch contexts. Any necessary address environment
* changes will be made when the interrupt returns.
*/
up_restorestate(rtcb->xcp.regs); /* 将新tcb的寄存器值恢复到CPU中 */
}
/* Copy the exception context into the TCB at the (old) head of the
* ready-to-run Task list. if up_saveusercontext returns a non-zero
* value, then this is really the previously running task restarting!
*/
else if (!up_saveusercontext(rtcb->xcp.regs)) /* 不在中断中,进行context切换 */
{
/* Restore the exception context of the rtcb at the (new) head
* of the ready-to-run task list.
*/
rtcb = this_task();
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
(void)group_addrenv(rtcb);
#endif
/* Update scheduler parameters */
sched_resume_scheduler(rtcb);
/* Then switch contexts */
up_fullcontextrestore(rtcb->xcp.regs); /* 恢复之后,变跳转到新的任务上执行了 */
}
}
}
最后,把nuttx的调度器相关代码图片贴一下,当你把流程跟一下后,发现其实很多文件都已经用到了
Task Schedule source code
补充
之前一直在类似
中断中是否能进行任务切换
,在关了中断之后怎么还能进行任务切换
等问题上纠缠不清,可以重新捋了一下。
在timer中断中完成任务切换,如下图:
中断中完成任务切换
中断触发后的步骤:
- IRQ disable & Mode Switch:进入中断时处理器是在中断模式下,会先切换至SVC模式,并关掉中断
- IRQ Context Save:现场保存,将所有的寄存器保存至中断栈上,并会将保存的地址
R0
传递给up_decodeirq
,在up_decodeirq
中,会用一个全局的宏CURRENT_REGS
指向传递的地址R0
- up_savestate():在
IRQ dispatch
的过程中,如果遇到了需要task切换的点,比如调到up_reprioritize_rtr()
函数,此时该函数会判断CURRENT_REGS
宏是否为NULL
,用于确定任务的切换是否发生在中断Handler
中。发生在中断Handler
中时,调用up_savestate()
函数,将CURRENT_REGS
中的内容保存至task A
中。- up_restorestate():将需要切换的
task B
中的现场恢复到CURRENT_REGS
中,此时已经完成了内容的覆盖,但是并没有将值写入寄存器中,也就是没有完成真正的任务切换。- IRQ Context Restore:当中断执行完毕后,恢复现场,此时会将之前地址上保存的值恢复到寄存器里,关键点来了,因为在
up_restorestate()
的时候,已经将新任务的内容覆盖了原有的,当完成现场恢复后,此时就跳转到了task B
中去了,也就完成了任务的切换。
- 在关中断后进行任务切换
关中断后任务切换
我在阅读源码的时候,发现enter_critical_section()
后进行了任务切换。当场就有点迷糊了,这个一关中断,切换到新任务后,新任务难道就不能响应中断了?后来发现了其实忽略了一个重要关键点,在TASK_B Context Restore
的过程中,会将TASK_B
保存的context
中的CPSR
恢复到寄存器中,这时候运行的就是TASK_B
的现场了,跟之前TASK_A
没有关系了。
上图中,TASK_B Context Restore
执行完后,TASK_A
就中断在这个点上了,当下一次再调度到TASK_A
时,会接着从这个点往下执行,所以就算在之前enter_critical_section()
关了中断,运行后leave_critical_section()
会打开中断,接着执行TASK_A
后续的流程。
如果我有新的疑问和心得,我会保持持续的更新...
网友评论