
- cotributed by Bjorn Swift - In this first phase, scheduling is moved from the kernel to the PM server. The next steps are to a) moving scheduling to its own server and b) include useful information in the "out of quantum" message, so that the scheduler can make use of this information. - The kernel process table now keeps record of who is responsible for scheduling each process (p_scheduler). When this pointer is NULL, the process will be scheduled by the kernel. If such a process runs out of quantum, the kernel will simply renew its quantum an requeue it. - When PM loads, it will take over scheduling of all running processes, except system processes, using sys_schedctl(). Essentially, this only results in taking over init. As children inherit a scheduler from their parent, user space programs forked by init will inherit PM (for now) as their scheduler. - Once a process has been assigned a scheduler, and runs out of quantum, its RTS_NO_QUANTUM flag will be set and the process dequeued. The kernel will send a message to the scheduler, on the process' behalf, informing the scheduler that it has run out of quantum. The scheduler can take what ever action it pleases, based on its policy, and then reschedule the process using the sys_schedule() system call. - Balance queues does not work as before. While the old in-kernel function used to renew the quantum of processes in the highest priority run queue, the user-space implementation only acts on processes that have been bumped down to a lower priority queue. This approach reacts slower to changes than the old one, but saves us sending a sys_schedule message for each process every time we balance the queues. Currently, when processes are moved up a priority queue, their quantum is also renewed, but this can be fiddled with. - do_nice has been removed from kernel. PM answers to get- and setpriority calls, updates it's own nice variable as well as the max_run_queue. This will be refactored once scheduling is moved to a separate server. We will probably have PM update it's local nice value and then send a message to whoever is scheduling the process. - changes to fix an issue in do_fork() where processes could run out of quantum but bypassing the code path that handles it correctly. The future plan is to remove the policy from do_fork() and implement it in userspace too.
267 lines
9.8 KiB
C
267 lines
9.8 KiB
C
#ifndef PROC_H
|
|
#define PROC_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/* Here is the declaration of the process table. It contains all process
|
|
* data, including registers, flags, scheduling priority, memory map,
|
|
* accounting, message passing (IPC) information, and so on.
|
|
*
|
|
* Many assembly code routines reference fields in it. The offsets to these
|
|
* fields are defined in the assembler include file sconst.h. When changing
|
|
* struct proc, be sure to change sconst.h to match.
|
|
*/
|
|
#include <minix/com.h>
|
|
#include <minix/portio.h>
|
|
#include "const.h"
|
|
#include "priv.h"
|
|
|
|
struct proc {
|
|
struct stackframe_s p_reg; /* process' registers saved in stack frame */
|
|
struct fpu_state_s p_fpu_state; /* process' fpu_regs saved lazily */
|
|
struct segframe p_seg; /* segment descriptors */
|
|
proc_nr_t p_nr; /* number of this process (for fast access) */
|
|
struct priv *p_priv; /* system privileges structure */
|
|
short p_rts_flags; /* process is runnable only if zero */
|
|
short p_misc_flags; /* flags that do not suspend the process */
|
|
|
|
char p_priority; /* current process priority */
|
|
char p_ticks_left; /* number of scheduling ticks left */
|
|
char p_quantum_size; /* quantum size in ticks */
|
|
struct proc *p_scheduler; /* who should get out of quantum msg */
|
|
|
|
struct mem_map p_memmap[NR_LOCAL_SEGS]; /* memory map (T, D, S) */
|
|
struct pagefault p_pagefault; /* valid if PAGEFAULT in p_rts_flags set */
|
|
struct proc *p_nextpagefault; /* next on PAGEFAULT chain */
|
|
|
|
clock_t p_user_time; /* user time in ticks */
|
|
clock_t p_sys_time; /* sys time in ticks */
|
|
|
|
clock_t p_virt_left; /* number of ticks left on virtual timer */
|
|
clock_t p_prof_left; /* number of ticks left on profile timer */
|
|
|
|
u64_t p_cycles; /* how many cycles did the process use */
|
|
|
|
struct proc *p_nextready; /* pointer to next ready process */
|
|
struct proc *p_caller_q; /* head of list of procs wishing to send */
|
|
struct proc *p_q_link; /* link to next proc wishing to send */
|
|
endpoint_t p_getfrom_e; /* from whom does process want to receive? */
|
|
endpoint_t p_sendto_e; /* to whom does process want to send? */
|
|
|
|
sigset_t p_pending; /* bit map for pending kernel signals */
|
|
|
|
char p_name[P_NAME_LEN]; /* name of the process, including \0 */
|
|
|
|
endpoint_t p_endpoint; /* endpoint number, generation-aware */
|
|
|
|
message p_sendmsg; /* Message from this process if SENDING */
|
|
message p_delivermsg; /* Message for this process if MF_DELIVERMSG */
|
|
vir_bytes p_delivermsg_vir; /* Virtual addr this proc wants message at */
|
|
vir_bytes p_delivermsg_lin; /* Linear addr this proc wants message at */
|
|
|
|
/* If handler functions detect a process wants to do something with
|
|
* memory that isn't present, VM has to fix it. Until it has asked
|
|
* what needs to be done and fixed it, save necessary state here.
|
|
*
|
|
* The requestor gets a copy of its request message in reqmsg and gets
|
|
* VMREQUEST set.
|
|
*/
|
|
struct {
|
|
struct proc *nextrestart; /* next in vmrestart chain */
|
|
struct proc *nextrequestor; /* next in vmrequest chain */
|
|
#define VMSTYPE_SYS_NONE 0
|
|
#define VMSTYPE_KERNELCALL 1
|
|
#define VMSTYPE_DELIVERMSG 2
|
|
#define VMSTYPE_MAP 3
|
|
|
|
int type; /* suspended operation */
|
|
union {
|
|
/* VMSTYPE_SYS_MESSAGE */
|
|
message reqmsg; /* suspended request message */
|
|
} saved;
|
|
|
|
/* Parameters of request to VM */
|
|
int req_type;
|
|
endpoint_t target;
|
|
union {
|
|
struct {
|
|
vir_bytes start, length; /* memory range */
|
|
u8_t writeflag; /* nonzero for write access */
|
|
} check;
|
|
struct {
|
|
char writeflag;
|
|
endpoint_t ep_s;
|
|
vir_bytes vir_s, vir_d;
|
|
vir_bytes length;
|
|
} map;
|
|
} params;
|
|
/* VM result when available */
|
|
int vmresult;
|
|
|
|
/* If the suspended operation is a sys_call, its details are
|
|
* stored here.
|
|
*/
|
|
} p_vmrequest;
|
|
|
|
int p_found; /* consistency checking variables */
|
|
#define PMAGIC 0xC0FFEE1
|
|
int p_magic; /* check validity of proc pointers */
|
|
|
|
#if DEBUG_TRACE
|
|
int p_schedules;
|
|
#endif
|
|
};
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
|
|
#define RTS_SLOT_FREE 0x01 /* process slot is free */
|
|
#define RTS_PROC_STOP 0x02 /* process has been stopped */
|
|
#define RTS_SENDING 0x04 /* process blocked trying to send */
|
|
#define RTS_RECEIVING 0x08 /* process blocked trying to receive */
|
|
#define RTS_SIGNALED 0x10 /* set when new kernel signal arrives */
|
|
#define RTS_SIG_PENDING 0x20 /* unready while signal being processed */
|
|
#define RTS_P_STOP 0x40 /* set when process is being traced */
|
|
#define RTS_NO_PRIV 0x80 /* keep forked system process from running */
|
|
#define RTS_NO_ENDPOINT 0x100 /* process cannot send or receive messages */
|
|
#define RTS_VMINHIBIT 0x200 /* not scheduled until pagetable set by VM */
|
|
#define RTS_PAGEFAULT 0x400 /* process has unhandled pagefault */
|
|
#define RTS_VMREQUEST 0x800 /* originator of vm memory request */
|
|
#define RTS_VMREQTARGET 0x1000 /* target of vm memory request */
|
|
#define RTS_SYS_LOCK 0x2000 /* temporary process lock flag for systask */
|
|
#define RTS_PREEMPTED 0x4000 /* this process was preempted by a higher
|
|
priority process and we should pick a new one
|
|
to run. Processes with this flag should be
|
|
returned to the front of their current
|
|
priority queue if they are still runnable
|
|
before we pick a new one
|
|
*/
|
|
#define RTS_NO_QUANTUM 0x8000 /* process ran out of its quantum and we should
|
|
pick a new one. Process was dequeued and
|
|
should be enqueued at the end of some run
|
|
queue again */
|
|
|
|
/* A process is runnable iff p_rts_flags == 0. */
|
|
#define rts_f_is_runnable(flg) ((flg) == 0)
|
|
#define proc_is_runnable(p) (rts_f_is_runnable((p)->p_rts_flags))
|
|
|
|
#define proc_is_preempted(p) ((p)->p_rts_flags & RTS_PREEMPTED)
|
|
#define proc_no_quantum(p) ((p)->p_rts_flags & RTS_NO_QUANTUM)
|
|
#define proc_ptr_ok(p) ((p)->p_magic == PMAGIC)
|
|
|
|
/* Macro to return: on which process is a certain process blocked?
|
|
* return endpoint number (can be ANY) or NONE. It's important to
|
|
* check RTS_SENDING first, and then RTS_RECEIVING, as they could
|
|
* both be on (if a sendrec() blocks on sending), and p_getfrom_e
|
|
* could be nonsense even though RTS_RECEIVING is on.
|
|
*/
|
|
#define P_BLOCKEDON(p) \
|
|
( \
|
|
((p)->p_rts_flags & RTS_SENDING) ? \
|
|
(p)->p_sendto_e : \
|
|
( \
|
|
( \
|
|
((p)->p_rts_flags & RTS_RECEIVING) ? \
|
|
(p)->p_getfrom_e : \
|
|
NONE \
|
|
) \
|
|
) \
|
|
)
|
|
|
|
/* These runtime flags can be tested and manipulated by these macros. */
|
|
|
|
#define RTS_ISSET(rp, f) (((rp)->p_rts_flags & (f)) == (f))
|
|
|
|
|
|
/* Set flag and dequeue if the process was runnable. */
|
|
#define RTS_SET(rp, f) \
|
|
do { \
|
|
const int rts = (rp)->p_rts_flags; \
|
|
(rp)->p_rts_flags |= (f); \
|
|
if(rts_f_is_runnable(rts) && !proc_is_runnable(rp)) { \
|
|
dequeue(rp); \
|
|
} \
|
|
} while(0)
|
|
|
|
/* Clear flag and enqueue if the process was not runnable but is now. */
|
|
#define RTS_UNSET(rp, f) \
|
|
do { \
|
|
int rts; \
|
|
rts = (rp)->p_rts_flags; \
|
|
(rp)->p_rts_flags &= ~(f); \
|
|
if(!rts_f_is_runnable(rts) && proc_is_runnable(rp)) { \
|
|
enqueue(rp); \
|
|
} \
|
|
} while(0)
|
|
|
|
/* Set flags to this value. */
|
|
#define RTS_SETFLAGS(rp, f) \
|
|
do { \
|
|
if(proc_is_runnable(rp) && (f)) { dequeue(rp); } \
|
|
(rp)->p_rts_flags = (f); \
|
|
} while(0)
|
|
|
|
/* Misc flags */
|
|
#define MF_REPLY_PEND 0x001 /* reply to IPC_REQUEST is pending */
|
|
#define MF_VIRT_TIMER 0x002 /* process-virtual timer is running */
|
|
#define MF_PROF_TIMER 0x004 /* process-virtual profile timer is running */
|
|
#define MF_KCALL_RESUME 0x008 /* processing a kernel call was interrupted,
|
|
most likely because we need VM to resolve a
|
|
problem or a long running copy was preempted.
|
|
We need to resume the kernel call execution
|
|
now
|
|
*/
|
|
#define MF_ASYNMSG 0x010 /* Asynchrous message pending */
|
|
#define MF_FULLVM 0x020
|
|
#define MF_DELIVERMSG 0x040 /* Copy message for him before running */
|
|
#define MF_SIG_DELAY 0x080 /* Send signal when no longer sending */
|
|
#define MF_SC_ACTIVE 0x100 /* Syscall tracing: in a system call now */
|
|
#define MF_SC_DEFER 0x200 /* Syscall tracing: deferred system call */
|
|
#define MF_SC_TRACE 0x400 /* Syscall tracing: trigger syscall events */
|
|
#define MF_USED_FPU 0x800 /* process used fpu during last execution run */
|
|
#define MF_FPU_INITIALIZED 0x1000 /* process already used math, so fpu
|
|
* regs are significant (initialized)*/
|
|
|
|
/* Scheduling priorities for p_priority. Values must start at zero (highest
|
|
* priority) and increment. Priorities of the processes in the boot image
|
|
* can be set in table.c.
|
|
*/
|
|
#define NR_SCHED_QUEUES 16 /* MUST equal minimum priority + 1 */
|
|
#define TASK_Q 0 /* highest, used for kernel tasks */
|
|
#define MAX_USER_Q 0 /* highest priority for user processes */
|
|
#define USER_Q (NR_SCHED_QUEUES / 2) /* default (should correspond to
|
|
nice 0) */
|
|
#define MIN_USER_Q (NR_SCHED_QUEUES - 1) /* minimum priority for user
|
|
processes */
|
|
|
|
/* Magic process table addresses. */
|
|
#define BEG_PROC_ADDR (&proc[0])
|
|
#define BEG_USER_ADDR (&proc[NR_TASKS])
|
|
#define END_PROC_ADDR (&proc[NR_TASKS + NR_PROCS])
|
|
|
|
#define cproc_addr(n) (&(proc + NR_TASKS)[(n)])
|
|
#define proc_addr(n) (&(proc[NR_TASKS + (n)]))
|
|
#define proc_nr(p) ((p)->p_nr)
|
|
|
|
#define isokprocn(n) ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
|
|
#define isemptyn(n) isemptyp(proc_addr(n))
|
|
#define isemptyp(p) ((p)->p_rts_flags == RTS_SLOT_FREE)
|
|
#define iskernelp(p) ((p) < BEG_USER_ADDR)
|
|
#define iskerneln(n) ((n) < 0)
|
|
#define isuserp(p) isusern((p) >= BEG_USER_ADDR)
|
|
#define isusern(n) ((n) >= 0)
|
|
#define isrootsysn(n) ((n) == ROOT_SYS_PROC_NR)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
EXTERN struct proc proc[NR_TASKS + NR_PROCS]; /* process table */
|
|
EXTERN struct proc *rdy_head[NR_SCHED_QUEUES]; /* ptrs to ready list headers */
|
|
EXTERN struct proc *rdy_tail[NR_SCHED_QUEUES]; /* ptrs to ready list tails */
|
|
|
|
_PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
|
|
message *m_ptr, int flags));
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* PROC_H */
|