my state.
trying to get some memory optimisation (less pagetable reloading, less tlb purging) features working smoothly. to be documented when committing to trunk :)
This commit is contained in:
parent
9d56ac3fc9
commit
4dae6c4bbc
@ -585,6 +585,7 @@
|
||||
#define SVMCTL_MRG_LEN m1_i1 /* MEMREQ_GET reply: length */
|
||||
#define SVMCTL_MRG_WRITE m1_i2 /* MEMREQ_GET reply: writeflag */
|
||||
#define SVMCTL_MRG_EP m1_i3 /* MEMREQ_GET reply: process */
|
||||
#define SVMCTL_MRG_REQUESTOR m1_p2 /* MEMREQ_GET reply: requestor */
|
||||
|
||||
/* Codes and field names for SYS_SYSCTL. */
|
||||
#define SYSCTL_CODE m1_i1 /* SYSCTL_CODE_* below */
|
||||
|
@ -53,9 +53,8 @@ _PROTOTYPE( int sys_vmctl, (endpoint_t who, int param, u32_t value));
|
||||
_PROTOTYPE( int sys_vmctl_get_pagefault_i386, (endpoint_t *who, u32_t *cr2, u32_t *err));
|
||||
_PROTOTYPE( int sys_vmctl_get_cr3_i386, (endpoint_t who, u32_t *cr3) );
|
||||
_PROTOTYPE( int sys_vmctl_get_memreq, (endpoint_t *who, vir_bytes *mem,
|
||||
vir_bytes *len, int *wrflag) );
|
||||
|
||||
|
||||
vir_bytes *len, int *wrflag, endpoint_t *) );
|
||||
_PROTOTYPE( int sys_vmctl_enable_paging, (struct mem_map *));
|
||||
|
||||
_PROTOTYPE( int sys_readbios, (phys_bytes address, void *buf, size_t size));
|
||||
_PROTOTYPE( int sys_stime, (time_t boottime));
|
||||
|
@ -77,7 +77,7 @@ register message *m_ptr; /* pointer to request message */
|
||||
return EPERM;
|
||||
}
|
||||
/* Get and check physical address. */
|
||||
if ((phys_buf = umap_virtual(proc_addr(proc_nr), D,
|
||||
if ((phys_buf = umap_local(proc_addr(proc_nr), D,
|
||||
(vir_bytes) m_ptr->DIO_VEC_ADDR, count)) == 0)
|
||||
return(EFAULT);
|
||||
}
|
||||
|
@ -11,53 +11,67 @@
|
||||
#include <minix/sysutil.h>
|
||||
#include "../../proc.h"
|
||||
#include "../../proto.h"
|
||||
#include "../../vm.h"
|
||||
|
||||
extern int vm_copy_in_progress, catch_pagefaults;
|
||||
extern struct proc *vm_copy_from, *vm_copy_to;
|
||||
extern u32_t npagefaults;
|
||||
|
||||
u32_t pagefault_cr2, pagefault_count = 0;
|
||||
vir_bytes *old_eip_ptr = NULL, *old_eax_ptr = NULL;
|
||||
PUBLIC u32_t pagefault_count = 0;
|
||||
|
||||
void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno)
|
||||
void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno,
|
||||
u32_t *old_eipptr, u32_t *old_eaxptr, u32_t pagefaultcr2)
|
||||
{
|
||||
int s;
|
||||
vir_bytes ph;
|
||||
u32_t pte;
|
||||
int procok = 0, pcok = 0, rangeok = 0;
|
||||
int in_memcpy = 0, in_physcopy = 0;
|
||||
int in_physcopy = 0;
|
||||
vir_bytes test_eip;
|
||||
|
||||
vmassert(old_eip_ptr);
|
||||
vmassert(old_eax_ptr);
|
||||
vmassert(old_eipptr);
|
||||
vmassert(old_eaxptr);
|
||||
|
||||
vmassert(*old_eip_ptr == old_eip);
|
||||
vmassert(old_eip_ptr != &old_eip);
|
||||
vmassert(*old_eipptr == old_eip);
|
||||
vmassert(old_eipptr != &old_eip);
|
||||
|
||||
vmassert(pagefault_count == 1);
|
||||
|
||||
if(catch_pagefaults) {
|
||||
vir_bytes test_eip;
|
||||
test_eip = k_reenter ? old_eip : pr->p_reg.pc;
|
||||
in_memcpy = (test_eip > (vir_bytes) _memcpy_k) &&
|
||||
(test_eip < (vir_bytes) _memcpy_k_fault);
|
||||
in_physcopy = (test_eip > (vir_bytes) phys_copy) &&
|
||||
(test_eip < (vir_bytes) phys_copy_fault);
|
||||
if((pcok = in_memcpy || in_physcopy)) {
|
||||
pagefault_count = 0;
|
||||
#if 0
|
||||
printf("kernel: pagefault in pr %d, addr 0x%lx, his cr3 0x%lx, actual cr3 0x%lx\n",
|
||||
pr->p_endpoint, pagefaultcr2, pr->p_seg.p_cr3, read_cr3());
|
||||
#endif
|
||||
|
||||
if(in_memcpy) {
|
||||
vmassert(!in_physcopy);
|
||||
*old_eip_ptr = _memcpy_k_fault;
|
||||
}
|
||||
if(in_physcopy) {
|
||||
vmassert(!in_memcpy);
|
||||
*old_eip_ptr = phys_copy_fault;
|
||||
}
|
||||
*old_eax_ptr = pagefault_cr2;
|
||||
|
||||
return;
|
||||
}
|
||||
if(pr->p_seg.p_cr3) {
|
||||
#if 0
|
||||
vm_print(pr->p_seg.p_cr3);
|
||||
#endif
|
||||
vmassert(pr->p_seg.p_cr3 == read_cr3());
|
||||
} else {
|
||||
vmassert(ptproc);
|
||||
vmassert(ptproc->p_seg.p_cr3 == read_cr3());
|
||||
}
|
||||
|
||||
test_eip = k_reenter ? old_eip : pr->p_reg.pc;
|
||||
|
||||
in_physcopy = (test_eip > (vir_bytes) phys_copy) &&
|
||||
(test_eip < (vir_bytes) phys_copy_fault);
|
||||
|
||||
if((k_reenter || iskernelp(pr)) &&
|
||||
catch_pagefaults && in_physcopy) {
|
||||
#if 0
|
||||
printf("pf caught! addr 0x%lx\n", pagefaultcr2);
|
||||
#endif
|
||||
*old_eipptr = phys_copy_fault;
|
||||
*old_eaxptr = pagefaultcr2;
|
||||
|
||||
pagefault_count = 0;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
npagefaults++;
|
||||
|
||||
/* System processes that don't have their own page table can't
|
||||
* have page faults. VM does have its own page table but also
|
||||
* can't have page faults (because VM has to handle them).
|
||||
@ -67,9 +81,9 @@ void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno)
|
||||
/* Page fault we can't / don't want to
|
||||
* handle.
|
||||
*/
|
||||
kprintf("pagefault for process %d ('%s'), pc = 0x%x, addr = 0x%x, flags = 0x%x\n",
|
||||
kprintf("pagefault for process %d ('%s'), pc = 0x%x, addr = 0x%x, flags = 0x%x, k_reenter %d\n",
|
||||
pr->p_endpoint, pr->p_name, pr->p_reg.pc,
|
||||
pagefault_cr2, trap_errno);
|
||||
pagefaultcr2, trap_errno, k_reenter);
|
||||
proc_stacktrace(pr);
|
||||
minix_panic("page fault in system process", pr->p_endpoint);
|
||||
|
||||
@ -86,12 +100,12 @@ void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno)
|
||||
* and tell VM there is a pagefault to be
|
||||
* handled.
|
||||
*/
|
||||
pr->p_pagefault.pf_virtual = pagefault_cr2;
|
||||
pr->p_pagefault.pf_virtual = pagefaultcr2;
|
||||
pr->p_pagefault.pf_flags = trap_errno;
|
||||
pr->p_nextpagefault = pagefaults;
|
||||
pagefaults = pr;
|
||||
|
||||
lock_notify(SYSTEM, VM_PROC_NR);
|
||||
lock_notify(HARDWARE, VM_PROC_NR);
|
||||
|
||||
pagefault_count = 0;
|
||||
|
||||
@ -101,12 +115,16 @@ void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno)
|
||||
/*===========================================================================*
|
||||
* exception *
|
||||
*===========================================================================*/
|
||||
PUBLIC void exception(vec_nr, trap_errno, old_eip, old_cs, old_eflags)
|
||||
PUBLIC void exception(vec_nr, trap_errno, old_eip, old_cs, old_eflags,
|
||||
old_eipptr, old_eaxptr, pagefaultcr2)
|
||||
unsigned vec_nr;
|
||||
u32_t trap_errno;
|
||||
u32_t old_eip;
|
||||
U16_t old_cs;
|
||||
u32_t old_eflags;
|
||||
u32_t *old_eipptr;
|
||||
u32_t *old_eaxptr;
|
||||
u32_t pagefaultcr2;
|
||||
{
|
||||
/* An exception or unexpected interrupt has occurred. */
|
||||
|
||||
@ -141,6 +159,8 @@ struct proc *t;
|
||||
|
||||
/* Save proc_ptr, because it may be changed by debug statements. */
|
||||
saved_proc = proc_ptr;
|
||||
|
||||
CHECK_RUNQUEUES;
|
||||
|
||||
ep = &ex_data[vec_nr];
|
||||
|
||||
@ -150,8 +170,9 @@ struct proc *t;
|
||||
}
|
||||
|
||||
if(vec_nr == PAGE_FAULT_VECTOR) {
|
||||
pagefault(old_eip, saved_proc, trap_errno);
|
||||
return;
|
||||
pagefault(old_eip, saved_proc, trap_errno,
|
||||
old_eipptr, old_eaxptr, pagefaultcr2);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If an exception occurs while running a process, the k_reenter variable
|
||||
@ -222,7 +243,7 @@ PUBLIC void proc_stacktrace(struct proc *proc)
|
||||
break;
|
||||
}
|
||||
if(PRCOPY(proc, v_bp + sizeof(v_pc), &v_pc, sizeof(v_pc)) != OK) {
|
||||
kprintf("(v_pc 0x%lx ?)", v_pc);
|
||||
kprintf("(v_pc 0x%lx ?)", v_bp + sizeof(v_pc));
|
||||
break;
|
||||
}
|
||||
kprintf("0x%lx ", (unsigned long) v_pc);
|
||||
|
@ -383,8 +383,6 @@ _phys_copy:
|
||||
mov edi, PC_ARGS+4(esp)
|
||||
mov eax, PC_ARGS+4+4(esp)
|
||||
|
||||
mov (_catch_pagefaults), 1
|
||||
|
||||
cmp eax, 10 ! avoid align overhead for small counts
|
||||
jb pc_small
|
||||
mov ecx, esi ! align source, hope target is too
|
||||
@ -403,12 +401,11 @@ pc_small:
|
||||
rep
|
||||
eseg movsb
|
||||
|
||||
mov eax, 0 ! 0 means: no fault
|
||||
_phys_copy_fault: ! kernel can send us here
|
||||
pop es
|
||||
pop edi
|
||||
pop esi
|
||||
mov eax, 0 ! 0 means: no fault
|
||||
_phys_copy_fault: ! kernel can send us here
|
||||
mov (_catch_pagefaults), 0
|
||||
ret
|
||||
|
||||
!*===========================================================================*
|
||||
@ -439,7 +436,7 @@ fill_start:
|
||||
jnz fill_start
|
||||
! Any remaining bytes?
|
||||
mov eax, 16(ebp)
|
||||
! and eax, 3
|
||||
and eax, 3
|
||||
remain_fill:
|
||||
cmp eax, 0
|
||||
jz fill_done
|
||||
|
@ -9,21 +9,21 @@
|
||||
#include <minix/cpufeature.h>
|
||||
#include <string.h>
|
||||
|
||||
#define FREEPDE_SRC 0
|
||||
#define FREEPDE_DST 1
|
||||
#define FREEPDE_MEMSET 2
|
||||
|
||||
#include <sys/vm_i386.h>
|
||||
|
||||
#include <minix/portio.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "../../proto.h"
|
||||
#include "../../proto.h"
|
||||
#include "../../debug.h"
|
||||
|
||||
PRIVATE int psok = 0;
|
||||
|
||||
extern u32_t createpde, linlincopies, physzero;
|
||||
int verifyrange = 0;
|
||||
|
||||
extern u32_t newpde, overwritepde, linlincopies,
|
||||
physzero, invlpgs, vmcheckranges, straightpdes;
|
||||
|
||||
#define PROCPDEPTR(pr, pi) ((u32_t *) ((u8_t *) vm_pagedirs +\
|
||||
I386_PAGE_SIZE * pr->p_nr + \
|
||||
@ -37,12 +37,15 @@ u8_t *vm_pagedirs = NULL;
|
||||
u32_t i386_invlpg_addr = 0;
|
||||
|
||||
#define WANT_FREEPDES 4
|
||||
PRIVATE int nfreepdes = 0, freepdes[WANT_FREEPDES];
|
||||
#define NOPDE -1
|
||||
#define PDEMASK(n) (1L << (n))
|
||||
PRIVATE int nfreepdes = 0, freepdes[WANT_FREEPDES], inusepde = NOPDE;
|
||||
PUBLIC u32_t dirtypde;
|
||||
|
||||
#define HASPT(procptr) ((procptr)->p_seg.p_cr3 != 0)
|
||||
|
||||
FORWARD _PROTOTYPE( u32_t phys_get32, (vir_bytes v) );
|
||||
FORWARD _PROTOTYPE( void vm_set_cr3, (u32_t value) );
|
||||
FORWARD _PROTOTYPE( void vm_set_cr3, (struct proc *pr) );
|
||||
FORWARD _PROTOTYPE( void set_cr3, (void) );
|
||||
FORWARD _PROTOTYPE( void vm_enable_paging, (void) );
|
||||
|
||||
@ -50,34 +53,13 @@ FORWARD _PROTOTYPE( void vm_enable_paging, (void) );
|
||||
|
||||
PUBLIC void vm_init(struct proc *newptproc)
|
||||
{
|
||||
u32_t newcr3;
|
||||
|
||||
int i;
|
||||
if(vm_running)
|
||||
minix_panic("vm_init: vm_running", NO_NUM);
|
||||
|
||||
ptproc = newptproc;
|
||||
newcr3 = ptproc->p_seg.p_cr3;
|
||||
kprintf("vm_init: ptproc: %s / %d, cr3 0x%lx\n",
|
||||
ptproc->p_name, ptproc->p_endpoint,
|
||||
ptproc->p_seg.p_cr3);
|
||||
vmassert(newcr3);
|
||||
|
||||
/* Set this cr3 now (not active until paging enabled). */
|
||||
vm_set_cr3(newcr3);
|
||||
|
||||
kprintf("vm_init: writing cr3 0x%lx done; cr3: 0x%lx\n",
|
||||
newcr3, read_cr3());
|
||||
|
||||
kprintf("vm_init: enabling\n");
|
||||
/* Actually enable paging (activating cr3 load above). */
|
||||
vm_set_cr3(newptproc);
|
||||
level0(vm_enable_paging);
|
||||
|
||||
kprintf("vm_init: enabled\n");
|
||||
|
||||
/* Don't do this init in the future. */
|
||||
vm_running = 1;
|
||||
|
||||
kprintf("vm_init done\n");
|
||||
}
|
||||
|
||||
PRIVATE u32_t phys_get32(addr)
|
||||
@ -91,9 +73,8 @@ phys_bytes addr;
|
||||
return v;
|
||||
}
|
||||
|
||||
if((r=lin_lin_copy(NULL, addr, NULL, D,
|
||||
proc_addr(SYSTEM), &v, &v, D,
|
||||
sizeof(v))) != OK) {
|
||||
if((r=lin_lin_copy(NULL, addr,
|
||||
proc_addr(SYSTEM), vir2phys(&v), sizeof(v))) != OK) {
|
||||
minix_panic("lin_lin_copy for phys_get32 failed", r);
|
||||
}
|
||||
|
||||
@ -102,11 +83,16 @@ phys_bytes addr;
|
||||
|
||||
PRIVATE u32_t vm_cr3; /* temp arg to level0() func */
|
||||
|
||||
PRIVATE void vm_set_cr3(value)
|
||||
u32_t value;
|
||||
PRIVATE void vm_set_cr3(struct proc *newptproc)
|
||||
{
|
||||
vm_cr3= value;
|
||||
level0(set_cr3);
|
||||
int u = 0;
|
||||
if(!intr_disabled()) { lock; u = 1; }
|
||||
vm_cr3= newptproc->p_seg.p_cr3;
|
||||
if(vm_cr3) {
|
||||
level0(set_cr3);
|
||||
ptproc = newptproc;
|
||||
}
|
||||
if(u) { unlock; }
|
||||
}
|
||||
|
||||
PRIVATE void set_cr3()
|
||||
@ -343,11 +329,13 @@ PUBLIC int vm_lookup(struct proc *proc, vir_bytes virtual, vir_bytes *physical,
|
||||
pde_v = phys_get32((u32_t) (root + pde));
|
||||
|
||||
if(!(pde_v & I386_VM_PRESENT)) {
|
||||
#if 0
|
||||
#if 1
|
||||
if(verifyrange) {
|
||||
kprintf("vm_lookup: %d:%s:0x%lx: cr3 0x%lx: pde %d not present\n",
|
||||
proc->p_endpoint, proc->p_name, virtual, root, pde);
|
||||
kprintf("kernel stack: ");
|
||||
util_stacktrace();
|
||||
}
|
||||
#endif
|
||||
NOREC_RETURN(vmlookup, EFAULT);
|
||||
}
|
||||
@ -365,6 +353,14 @@ PUBLIC int vm_lookup(struct proc *proc, vir_bytes virtual, vir_bytes *physical,
|
||||
vmassert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
|
||||
pte_v = phys_get32((u32_t) (pt + pte));
|
||||
if(!(pte_v & I386_VM_PRESENT)) {
|
||||
#if 1
|
||||
if(verifyrange) {
|
||||
kprintf("vm_lookup: %d:%s:0x%lx: cr3 0x%lx: pte %d not present\n",
|
||||
proc->p_endpoint, proc->p_name, virtual, root, pte);
|
||||
kprintf("kernel stack: ");
|
||||
util_stacktrace();
|
||||
}
|
||||
#endif
|
||||
NOREC_RETURN(vmlookup, EFAULT);
|
||||
}
|
||||
|
||||
@ -449,35 +445,90 @@ PUBLIC int vm_contiguous(struct proc *targetproc, u32_t vir_buf, size_t bytes)
|
||||
boundaries++;
|
||||
}
|
||||
|
||||
if(verbose_vm)
|
||||
kprintf("vm_contiguous: yes (%d boundaries tested)\n",
|
||||
boundaries);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int vm_checkrange_verbose = 0;
|
||||
|
||||
extern u32_t vmreqs;
|
||||
|
||||
/*===========================================================================*
|
||||
* vm_suspend *
|
||||
*===========================================================================*/
|
||||
PUBLIC int vm_suspend(struct proc *caller, struct proc *target)
|
||||
PUBLIC int vm_suspend(struct proc *caller, struct proc *target,
|
||||
vir_bytes linaddr, vir_bytes len, int wrflag, int type)
|
||||
{
|
||||
/* This range is not OK for this process. Set parameters
|
||||
* of the request and notify VM about the pending request.
|
||||
*/
|
||||
if(RTS_ISSET(caller, VMREQUEST))
|
||||
minix_panic("VMREQUEST already set", caller->p_endpoint);
|
||||
RTS_LOCK_SET(caller, VMREQUEST);
|
||||
vmassert(!RTS_ISSET(caller, VMREQUEST));
|
||||
vmassert(!RTS_ISSET(caller, VMREQTARGET));
|
||||
vmassert(!RTS_ISSET(target, VMREQUEST));
|
||||
vmassert(!RTS_ISSET(target, VMREQTARGET));
|
||||
|
||||
RTS_LOCK_SET(caller, VMREQUEST);
|
||||
RTS_LOCK_SET(target, VMREQTARGET);
|
||||
|
||||
#if DEBUG_VMASSERT
|
||||
caller->p_vmrequest.stacktrace[0] = '\0';
|
||||
util_stacktrace_strcat(caller->p_vmrequest.stacktrace);
|
||||
#endif
|
||||
|
||||
vmreqs++;
|
||||
|
||||
/* Set caller in target. */
|
||||
target->p_vmrequest.requestor = caller;
|
||||
caller->p_vmrequest.writeflag = 1;
|
||||
caller->p_vmrequest.start = linaddr;
|
||||
caller->p_vmrequest.length = len;
|
||||
caller->p_vmrequest.who = target->p_endpoint;
|
||||
caller->p_vmrequest.type = type;
|
||||
|
||||
/* Connect caller on vmrequest wait queue. */
|
||||
caller->p_vmrequest.nextrequestor = vmrequest;
|
||||
vmrequest = caller;
|
||||
if(!caller->p_vmrequest.nextrequestor)
|
||||
if(!(caller->p_vmrequest.nextrequestor = vmrequest))
|
||||
lock_notify(SYSTEM, VM_PROC_NR);
|
||||
vmrequest = caller;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* delivermsg *
|
||||
*===========================================================================*/
|
||||
int delivermsg(struct proc *rp)
|
||||
{
|
||||
phys_bytes addr;
|
||||
int r;
|
||||
NOREC_ENTER(deliver);
|
||||
|
||||
vmassert(rp->p_misc_flags & MF_DELIVERMSG);
|
||||
vmassert(rp->p_delivermsg.m_source != NONE);
|
||||
|
||||
vmassert(rp->p_delivermsg_lin);
|
||||
vmassert(rp->p_delivermsg_lin ==
|
||||
umap_local(rp, D, rp->p_delivermsg_vir, sizeof(message)));
|
||||
|
||||
vm_set_cr3(rp);
|
||||
|
||||
vmassert(intr_disabled());
|
||||
vmassert(!catch_pagefaults);
|
||||
catch_pagefaults = 1;
|
||||
addr = phys_copy(vir2phys(&rp->p_delivermsg),
|
||||
rp->p_delivermsg_lin, sizeof(message));
|
||||
vmassert(catch_pagefaults);
|
||||
catch_pagefaults = 0;
|
||||
|
||||
if(addr) {
|
||||
printf("phys_copy failed - addr 0x%lx\n", addr);
|
||||
vm_suspend(rp, rp, rp->p_delivermsg_lin, sizeof(message), 1,
|
||||
VMSTYPE_DELIVERMSG);
|
||||
r = VMSUSPEND;
|
||||
} else {
|
||||
#if DEBUG_VMASSERT
|
||||
rp->p_delivermsg.m_source = NONE;
|
||||
rp->p_delivermsg_lin = 0;
|
||||
#endif
|
||||
rp->p_misc_flags &= ~MF_DELIVERMSG;
|
||||
r = OK;
|
||||
}
|
||||
|
||||
NOREC_RETURN(deliver, r);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
@ -491,22 +542,23 @@ PUBLIC int vm_checkrange(struct proc *caller, struct proc *target,
|
||||
|
||||
NOREC_ENTER(vmcheckrange);
|
||||
|
||||
vmcheckranges++;
|
||||
|
||||
if(!HASPT(target))
|
||||
NOREC_RETURN(vmcheckrange, OK);
|
||||
|
||||
/* If caller has had a reply to this request, return it. */
|
||||
if(RTS_ISSET(caller, VMREQUEST)) {
|
||||
if(!verifyrange && RTS_ISSET(caller, VMREQUEST)) {
|
||||
if(caller->p_vmrequest.who == target->p_endpoint) {
|
||||
if(caller->p_vmrequest.vmresult == VMSUSPEND)
|
||||
minix_panic("check sees VMSUSPEND?", NO_NUM);
|
||||
vmassert(caller->p_vmrequest.vmresult != VMSUSPEND);
|
||||
RTS_LOCK_UNSET(caller, VMREQUEST);
|
||||
#if 0
|
||||
#if 1
|
||||
kprintf("SYSTEM: vm_checkrange: returning vmresult %d\n",
|
||||
caller->p_vmrequest.vmresult);
|
||||
#endif
|
||||
NOREC_RETURN(vmcheckrange, caller->p_vmrequest.vmresult);
|
||||
} else {
|
||||
#if 0
|
||||
#if 1
|
||||
kprintf("SYSTEM: vm_checkrange: caller has a request for %d, "
|
||||
"but our target is %d\n",
|
||||
caller->p_vmrequest.who, target->p_endpoint);
|
||||
@ -525,25 +577,29 @@ PUBLIC int vm_checkrange(struct proc *caller, struct proc *target,
|
||||
|
||||
for(v = vir; v < vir + bytes; v+= I386_PAGE_SIZE) {
|
||||
u32_t phys;
|
||||
int r;
|
||||
|
||||
/* If page exists and it's writable if desired, we're OK
|
||||
* for this page.
|
||||
*/
|
||||
if(vm_lookup(target, v, &phys, &flags) == OK &&
|
||||
if((r=vm_lookup(target, v, &phys, &flags)) == OK &&
|
||||
!(wrfl && !(flags & I386_VM_WRITE))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if(!checkonly) {
|
||||
/* Set parameters in caller. */
|
||||
vm_suspend(caller, target);
|
||||
caller->p_vmrequest.writeflag = wrfl;
|
||||
caller->p_vmrequest.start = vir;
|
||||
caller->p_vmrequest.length = bytes;
|
||||
caller->p_vmrequest.who = target->p_endpoint;
|
||||
if(verifyrange) {
|
||||
int wrok;
|
||||
wrok = !(wrfl && !(flags & I386_VM_WRITE));
|
||||
printf("checkrange failed; lookup: %d; write ok: %d\n",
|
||||
r, wrok);
|
||||
}
|
||||
|
||||
if(!checkonly) {
|
||||
vmassert(k_reenter == -1);
|
||||
vm_suspend(caller, target, vir, bytes, wrfl,
|
||||
VMSTYPE_KERNELCALL);
|
||||
}
|
||||
|
||||
/* SYSTEM loop will fill in VMSTYPE_SYS_MESSAGE. */
|
||||
NOREC_RETURN(vmcheckrange, VMSUSPEND);
|
||||
}
|
||||
|
||||
@ -636,9 +692,17 @@ void invlpg_range(u32_t lin, u32_t bytes)
|
||||
o = lin % I386_PAGE_SIZE;
|
||||
lin -= o;
|
||||
limit = (limit + o) & I386_VM_ADDR_MASK;
|
||||
#if 0
|
||||
for(i386_invlpg_addr = lin; i386_invlpg_addr <= limit;
|
||||
i386_invlpg_addr += I386_PAGE_SIZE)
|
||||
i386_invlpg_addr += I386_PAGE_SIZE) {
|
||||
invlpgs++;
|
||||
level0(i386_invlpg_level0);
|
||||
}
|
||||
#else
|
||||
vm_cr3= ptproc->p_seg.p_cr3;
|
||||
vmassert(vm_cr3);
|
||||
level0(set_cr3);
|
||||
#endif
|
||||
}
|
||||
|
||||
u32_t thecr3;
|
||||
@ -662,79 +726,77 @@ u32_t read_cr3(void)
|
||||
* address space), SEG (hardware segment), VIRT (in-datasegment
|
||||
* address if known).
|
||||
*/
|
||||
#define CREATEPDE(PROC, PTR, LINADDR, OFFSET, FREEPDE, VIRT, SEG, REMAIN, BYTES) { \
|
||||
#define CREATEPDE(PROC, PTR, LINADDR, REMAIN, BYTES) { \
|
||||
int proc_pde_index; \
|
||||
FIXME("CREATEPDE: check if invlpg is necessary"); \
|
||||
if(PROC == ptproc) { \
|
||||
FIXME("CREATEPDE: use in-memory process"); \
|
||||
} \
|
||||
if((PROC) && iskernelp(PROC) && SEG == D) { \
|
||||
PTR = VIRT; \
|
||||
OFFSET = 0; \
|
||||
proc_pde_index = I386_VM_PDE(LINADDR); \
|
||||
if((PROC) && (((PROC) == ptproc) || iskernelp(PROC))) { \
|
||||
PTR = LINADDR; \
|
||||
straightpdes++; \
|
||||
} else { \
|
||||
u32_t pdeval, *pdevalptr, newlin; \
|
||||
int pde_index; \
|
||||
int use_pde = NOPDE; \
|
||||
int fp; \
|
||||
int mustinvl; \
|
||||
u32_t pdeval, *pdevalptr, mask; \
|
||||
phys_bytes offset; \
|
||||
vmassert(psok); \
|
||||
pde_index = I386_VM_PDE(LINADDR); \
|
||||
vmassert(!iskernelp(PROC)); \
|
||||
createpde++; \
|
||||
if(PROC) { \
|
||||
u32_t *pdeptr; \
|
||||
u32_t *pdeptr; \
|
||||
vmassert(!iskernelp(PROC)); \
|
||||
vmassert(HASPT(PROC)); \
|
||||
pdeptr = PROCPDEPTR(PROC, pde_index); \
|
||||
pdeval = *pdeptr; \
|
||||
} else { \
|
||||
pdeptr = PROCPDEPTR(PROC, proc_pde_index); \
|
||||
pdeval = *pdeptr; \
|
||||
} else { \
|
||||
vmassert(!iskernelp(PROC)); \
|
||||
pdeval = (LINADDR & I386_VM_ADDR_MASK_4MB) | \
|
||||
I386_VM_BIGPAGE | I386_VM_PRESENT | \
|
||||
I386_VM_WRITE | I386_VM_USER; \
|
||||
} \
|
||||
*PROCPDEPTR(ptproc, FREEPDE) = pdeval; \
|
||||
newlin = I386_BIG_PAGE_SIZE*FREEPDE; \
|
||||
PTR = (u8_t *) phys2vir(newlin); \
|
||||
OFFSET = LINADDR & I386_VM_OFFSET_MASK_4MB; \
|
||||
REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - OFFSET); \
|
||||
invlpg_range(newlin + OFFSET, REMAIN); \
|
||||
for(fp = 0; fp < nfreepdes; fp++) { \
|
||||
int k = freepdes[fp]; \
|
||||
if(inusepde == k) \
|
||||
continue; \
|
||||
use_pde = k; \
|
||||
mask = PDEMASK(k); \
|
||||
vmassert(mask); \
|
||||
if(dirtypde & mask) \
|
||||
continue; \
|
||||
break; \
|
||||
} \
|
||||
vmassert(use_pde != NOPDE); \
|
||||
vmassert(mask); \
|
||||
if(dirtypde & mask) { \
|
||||
mustinvl = 1; \
|
||||
overwritepde++; \
|
||||
} else { \
|
||||
mustinvl = 0; \
|
||||
dirtypde |= mask; \
|
||||
newpde++; \
|
||||
} \
|
||||
inusepde = use_pde; \
|
||||
*PROCPDEPTR(ptproc, use_pde) = pdeval; \
|
||||
offset = LINADDR & I386_VM_OFFSET_MASK_4MB; \
|
||||
PTR = I386_BIG_PAGE_SIZE*use_pde + offset; \
|
||||
REMAIN = MIN(REMAIN, I386_BIG_PAGE_SIZE - offset); \
|
||||
if(1 || mustinvl) { \
|
||||
invlpg_range(PTR, REMAIN); \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
/*===========================================================================*
|
||||
* arch_switch_copymsg *
|
||||
*===========================================================================*/
|
||||
phys_bytes arch_switch_copymsg(struct proc *rp, message *m, phys_bytes lin)
|
||||
{
|
||||
phys_bytes r;
|
||||
int u = 0;
|
||||
if(!intr_disabled()) { lock; u = 1; }
|
||||
if(rp->p_seg.p_cr3 && ptproc != rp) {
|
||||
vm_set_cr3(rp->p_seg.p_cr3);
|
||||
ptproc = rp;
|
||||
}
|
||||
r = phys_copy(vir2phys(m), lin, sizeof(message));
|
||||
if(u) { unlock; }
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lin_lin_copy *
|
||||
*===========================================================================*/
|
||||
int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, u8_t *vsrc,
|
||||
int srcseg,
|
||||
struct proc *dstproc, vir_bytes dstlinaddr, u8_t *vdst,
|
||||
int dstseg,
|
||||
vir_bytes bytes)
|
||||
int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
|
||||
struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
|
||||
{
|
||||
u32_t addr;
|
||||
int procslot;
|
||||
u32_t catchrange_dst, catchrange_lo, catchrange_hi;
|
||||
NOREC_ENTER(linlincopy);
|
||||
|
||||
linlincopies++;
|
||||
|
||||
if(srcproc && dstproc && iskernelp(srcproc) && iskernelp(dstproc)) {
|
||||
memcpy(vdst, vsrc, bytes);
|
||||
NOREC_RETURN(linlincopy, OK);
|
||||
}
|
||||
|
||||
FIXME("lin_lin_copy requires big pages");
|
||||
vmassert(vm_running);
|
||||
vmassert(!catch_pagefaults);
|
||||
@ -747,37 +809,30 @@ int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, u8_t *vsrc,
|
||||
procslot = ptproc->p_nr;
|
||||
|
||||
vmassert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES);
|
||||
vmassert(freepdes[FREEPDE_SRC] < freepdes[FREEPDE_DST]);
|
||||
|
||||
catchrange_lo = I386_BIG_PAGE_SIZE*freepdes[FREEPDE_SRC];
|
||||
catchrange_dst = I386_BIG_PAGE_SIZE*freepdes[FREEPDE_DST];
|
||||
catchrange_hi = I386_BIG_PAGE_SIZE*(freepdes[FREEPDE_DST]+1);
|
||||
|
||||
while(bytes > 0) {
|
||||
u8_t *srcptr, *dstptr;
|
||||
vir_bytes srcoffset, dstoffset;
|
||||
phys_bytes srcptr, dstptr;
|
||||
vir_bytes chunk = bytes;
|
||||
|
||||
/* Set up 4MB ranges. */
|
||||
CREATEPDE(srcproc, srcptr, srclinaddr, srcoffset,
|
||||
freepdes[FREEPDE_SRC], vsrc, srcseg, chunk, bytes);
|
||||
CREATEPDE(dstproc, dstptr, dstlinaddr, dstoffset,
|
||||
freepdes[FREEPDE_DST], vdst, dstseg, chunk, bytes);
|
||||
inusepde = NOPDE;
|
||||
CREATEPDE(srcproc, srcptr, srclinaddr, chunk, bytes);
|
||||
CREATEPDE(dstproc, dstptr, dstlinaddr, chunk, bytes);
|
||||
|
||||
/* Copy pages. */
|
||||
vmassert(intr_disabled());
|
||||
vmassert(!catch_pagefaults);
|
||||
catch_pagefaults = 1;
|
||||
addr=_memcpy_k(dstptr + dstoffset, srcptr + srcoffset, chunk);
|
||||
addr=phys_copy(srcptr, dstptr, chunk);
|
||||
vmassert(intr_disabled());
|
||||
vmassert(catch_pagefaults);
|
||||
catch_pagefaults = 0;
|
||||
|
||||
if(addr) {
|
||||
if(addr >= catchrange_lo && addr < catchrange_dst) {
|
||||
if(addr >= srcptr && addr < (srcptr + chunk)) {
|
||||
NOREC_RETURN(linlincopy, EFAULT_SRC);
|
||||
}
|
||||
if(addr >= catchrange_dst && addr < catchrange_hi) {
|
||||
if(addr >= dstptr && addr < (dstptr + chunk)) {
|
||||
NOREC_RETURN(linlincopy, EFAULT_DST);
|
||||
}
|
||||
minix_panic("lin_lin_copy fault out of range", NO_NUM);
|
||||
@ -785,15 +840,11 @@ int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, u8_t *vsrc,
|
||||
/* Not reached. */
|
||||
NOREC_RETURN(linlincopy, EFAULT);
|
||||
}
|
||||
|
||||
vmassert(memcmp(dstptr + dstoffset, srcptr + srcoffset, chunk) == 0);
|
||||
|
||||
/* Update counter and addresses for next iteration, if any. */
|
||||
bytes -= chunk;
|
||||
srclinaddr += chunk;
|
||||
dstlinaddr += chunk;
|
||||
vsrc += chunk;
|
||||
vdst += chunk;
|
||||
}
|
||||
|
||||
NOREC_RETURN(linlincopy, OK);
|
||||
@ -805,12 +856,12 @@ int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, u8_t *vsrc,
|
||||
int vm_phys_memset(phys_bytes ph, u8_t c, phys_bytes bytes)
|
||||
{
|
||||
char *v;
|
||||
u32_t p;
|
||||
p = c | (c << 8) | (c << 16) | (c << 24);
|
||||
|
||||
physzero++;
|
||||
|
||||
if(!vm_running) {
|
||||
u32_t p;
|
||||
p = c | (c << 8) | (c << 16) | (c << 24);
|
||||
phys_memset(ph, p, bytes);
|
||||
return OK;
|
||||
}
|
||||
@ -822,14 +873,13 @@ int vm_phys_memset(phys_bytes ph, u8_t c, phys_bytes bytes)
|
||||
*/
|
||||
while(bytes > 0) {
|
||||
vir_bytes chunk = bytes;
|
||||
u8_t *ptr;
|
||||
u32_t offset;
|
||||
CREATEPDE(((struct proc *) NULL), ptr, ph,
|
||||
offset, freepdes[FREEPDE_MEMSET], 0, 0, chunk, bytes);
|
||||
phys_bytes ptr;
|
||||
inusepde = NOPDE;
|
||||
CREATEPDE(((struct proc *) NULL), ptr, ph, chunk, bytes);
|
||||
/* We can memset as many bytes as we have remaining,
|
||||
* or as many as remain in the 4MB chunk we mapped in.
|
||||
*/
|
||||
memset(ptr + offset, c, chunk);
|
||||
phys_memset(ptr, p, chunk);
|
||||
bytes -= chunk;
|
||||
ph += chunk;
|
||||
}
|
||||
@ -930,30 +980,43 @@ int vmcheck; /* if nonzero, can return VMSUSPEND */
|
||||
|
||||
if(vm_running) {
|
||||
int r;
|
||||
struct proc *target, *caller;
|
||||
struct proc *caller;
|
||||
|
||||
caller = proc_addr(who_p);
|
||||
|
||||
if(RTS_ISSET(caller, VMREQUEST)) {
|
||||
struct proc *target;
|
||||
int pn;
|
||||
vmassert(caller->p_vmrequest.vmresult != VMSUSPEND);
|
||||
RTS_LOCK_UNSET(caller, VMREQUEST);
|
||||
if(caller->p_vmrequest.vmresult != OK) {
|
||||
printf("virtual_copy: returning VM error %d\n",
|
||||
caller->p_vmrequest.vmresult);
|
||||
NOREC_RETURN(virtualcopy, caller->p_vmrequest.vmresult);
|
||||
}
|
||||
}
|
||||
|
||||
if((r=lin_lin_copy(procs[_SRC_], phys_addr[_SRC_],
|
||||
(u8_t *) src_addr->offset, src_addr->segment,
|
||||
procs[_DST_], phys_addr[_DST_], (u8_t *) dst_addr->offset,
|
||||
dst_addr->segment, bytes)) != OK) {
|
||||
procs[_DST_], phys_addr[_DST_], bytes)) != OK) {
|
||||
struct proc *target;
|
||||
int wr;
|
||||
phys_bytes lin;
|
||||
if(r != EFAULT_SRC && r != EFAULT_DST)
|
||||
minix_panic("lin_lin_copy failed", r);
|
||||
if(!vmcheck) {
|
||||
NOREC_RETURN(virtualcopy, r);
|
||||
}
|
||||
|
||||
caller = proc_addr(who_p);
|
||||
|
||||
vmassert(procs[_SRC_] && procs[_DST_]);
|
||||
|
||||
if(r == EFAULT_SRC) {
|
||||
caller->p_vmrequest.start = phys_addr[_SRC_];
|
||||
lin = phys_addr[_SRC_];
|
||||
target = procs[_SRC_];
|
||||
caller->p_vmrequest.writeflag = 0;
|
||||
wr = 0;
|
||||
} else if(r == EFAULT_DST) {
|
||||
caller->p_vmrequest.start = phys_addr[_DST_];
|
||||
lin = phys_addr[_DST_];
|
||||
target = procs[_DST_];
|
||||
caller->p_vmrequest.writeflag = 1;
|
||||
wr = 1;
|
||||
} else {
|
||||
minix_panic("r strange", r);
|
||||
}
|
||||
@ -964,10 +1027,9 @@ int vmcheck; /* if nonzero, can return VMSUSPEND */
|
||||
target->p_endpoint, target->p_name);
|
||||
#endif
|
||||
|
||||
caller->p_vmrequest.length = bytes;
|
||||
caller->p_vmrequest.who = target->p_endpoint;
|
||||
|
||||
vm_suspend(caller, target);
|
||||
vmassert(k_reenter == -1);
|
||||
vmassert(proc_ptr->p_endpoint == SYSTEM);
|
||||
vm_suspend(caller, target, lin, bytes, wr, VMSTYPE_KERNELCALL);
|
||||
|
||||
NOREC_RETURN(virtualcopy, VMSUSPEND);
|
||||
}
|
||||
@ -990,7 +1052,8 @@ int vmcheck; /* if nonzero, can return VMSUSPEND */
|
||||
}
|
||||
|
||||
/* Now copy bytes between physical addresseses. */
|
||||
phys_copy(phys_addr[_SRC_], phys_addr[_DST_], (phys_bytes) bytes);
|
||||
if(phys_copy(phys_addr[_SRC_], phys_addr[_DST_], (phys_bytes) bytes))
|
||||
NOREC_RETURN(virtualcopy, EFAULT);
|
||||
|
||||
NOREC_RETURN(virtualcopy, OK);
|
||||
}
|
||||
|
@ -73,10 +73,7 @@ begbss:
|
||||
|
||||
.define _restart
|
||||
.define save
|
||||
.define _pagefault_cr2
|
||||
.define _pagefault_count
|
||||
.define _old_eip_ptr
|
||||
.define _old_eax_ptr
|
||||
.define _cr3_test
|
||||
.define _cr3_reload
|
||||
.define _write_cr3 ! write cr3
|
||||
@ -104,6 +101,8 @@ begbss:
|
||||
.define _params_size
|
||||
.define _params_offset
|
||||
.define _mon_ds
|
||||
.define _schedcheck
|
||||
.define _dirtypde
|
||||
|
||||
.define _hwint00 ! handlers for hardware interrupts
|
||||
.define _hwint01
|
||||
@ -395,7 +394,7 @@ _p_s_call:
|
||||
|
||||
call _sys_call ! sys_call(call_nr, src_dst, m_ptr, bit_map)
|
||||
! caller is now explicitly in proc_ptr
|
||||
mov AXREG(esi), eax ! sys_call MUST PRESERVE si
|
||||
mov AXREG(esi), eax
|
||||
|
||||
! Fall into code to restart proc/task running.
|
||||
|
||||
@ -406,25 +405,22 @@ _restart:
|
||||
|
||||
! Restart the current process or the next process if it is set.
|
||||
|
||||
cmp (_next_ptr), 0 ! see if another process is scheduled
|
||||
jz 0f
|
||||
mov eax, (_next_ptr)
|
||||
mov (_proc_ptr), eax ! schedule new process
|
||||
mov (_next_ptr), 0
|
||||
0: mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
|
||||
call _schedcheck ! ask C function who we're running
|
||||
mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
|
||||
lldt P_LDT_SEL(esp) ! enable process' segment descriptors
|
||||
inc (_cr3_test)
|
||||
cmp P_CR3(esp), 0 ! process does not have its own PT
|
||||
jz noload
|
||||
jz 0f
|
||||
mov eax, P_CR3(esp)
|
||||
cmp eax, (loadedcr3)
|
||||
jz noload
|
||||
jz 0f
|
||||
inc (_cr3_reload)
|
||||
mov cr3, eax
|
||||
mov (loadedcr3), eax
|
||||
mov eax, (_proc_ptr)
|
||||
mov (_ptproc), eax
|
||||
noload:
|
||||
! mov (_dirtypde), 0
|
||||
0:
|
||||
lea eax, P_STACKTOP(esp) ! arrange for next interrupt
|
||||
mov (_tss+TSS3_S_SP0), eax ! to save state in process table
|
||||
restart1:
|
||||
@ -500,7 +496,7 @@ _page_fault:
|
||||
push PAGE_FAULT_VECTOR
|
||||
push eax
|
||||
mov eax, cr2
|
||||
sseg mov (_pagefault_cr2), eax
|
||||
sseg mov (pagefaultcr2), eax
|
||||
sseg inc (_pagefault_count)
|
||||
pop eax
|
||||
jmp errexception
|
||||
@ -530,8 +526,8 @@ errexception:
|
||||
sseg pop (ex_number)
|
||||
sseg pop (trap_errno)
|
||||
exception1: ! Common for all exceptions.
|
||||
sseg mov (_old_eax_ptr), esp ! where will eax be saved?
|
||||
sseg sub (_old_eax_ptr), PCREG-AXREG ! here
|
||||
sseg mov (old_eax_ptr), esp ! where will eax be saved?
|
||||
sseg sub (old_eax_ptr), PCREG-AXREG ! here
|
||||
|
||||
push eax ! eax is scratch register
|
||||
|
||||
@ -539,7 +535,7 @@ exception1: ! Common for all exceptions.
|
||||
sseg mov (old_eip), eax
|
||||
mov eax, esp
|
||||
add eax, 4
|
||||
sseg mov (_old_eip_ptr), eax
|
||||
sseg mov (old_eip_ptr), eax
|
||||
movzx eax, 4+4(esp) ! old cs
|
||||
sseg mov (old_cs), eax
|
||||
mov eax, 8+4(esp) ! old eflags
|
||||
@ -547,6 +543,9 @@ exception1: ! Common for all exceptions.
|
||||
|
||||
pop eax
|
||||
call save
|
||||
push (pagefaultcr2)
|
||||
push (old_eax_ptr)
|
||||
push (old_eip_ptr)
|
||||
push (old_eflags)
|
||||
push (old_cs)
|
||||
push (old_eip)
|
||||
@ -554,7 +553,7 @@ exception1: ! Common for all exceptions.
|
||||
push (ex_number)
|
||||
call _exception ! (ex_number, trap_errno, old_eip,
|
||||
! old_cs, old_eflags)
|
||||
add esp, 5*4
|
||||
add esp, 8*4
|
||||
ret
|
||||
|
||||
|
||||
@ -566,8 +565,14 @@ _write_cr3:
|
||||
push ebp
|
||||
mov ebp, esp
|
||||
mov eax, 8(ebp)
|
||||
inc (_cr3_test)
|
||||
! cmp eax, (loadedcr3)
|
||||
! jz 0f
|
||||
inc (_cr3_reload)
|
||||
mov cr3, eax
|
||||
mov (loadedcr3), eax
|
||||
! mov (_dirtypde), 0
|
||||
0:
|
||||
pop ebp
|
||||
ret
|
||||
|
||||
@ -591,8 +596,11 @@ k_stack:
|
||||
k_stktop: ! top of kernel stack
|
||||
.comm ex_number, 4
|
||||
.comm trap_errno, 4
|
||||
.comm old_eip_ptr, 4
|
||||
.comm old_eax_ptr, 4
|
||||
.comm old_eip, 4
|
||||
.comm old_cs, 4
|
||||
.comm old_eflags, 4
|
||||
.comm pagefaultcr2, 4
|
||||
.comm loadedcr3, 4
|
||||
|
||||
|
@ -52,11 +52,13 @@ _PROTOTYPE( void vir_outsw, (u16_t port, struct proc *proc, u32_t vir, size_t co
|
||||
_PROTOTYPE( void i386_updatepde, (int pde, u32_t val));
|
||||
_PROTOTYPE( void i386_freepde, (int pde));
|
||||
_PROTOTYPE( void getcr3val, (void));
|
||||
_PROTOTYPE( void switchedcr3, (void));
|
||||
|
||||
|
||||
/* exception.c */
|
||||
_PROTOTYPE( void exception, (unsigned vec_nr, u32_t trap_errno,
|
||||
u32_t old_eip, U16_t old_cs, u32_t old_eflags) );
|
||||
u32_t old_eip, U16_t old_cs, u32_t old_eflags,
|
||||
u32_t *old_eip_ptr, u32_t *old_eax_ptr, u32_t pagefaultcr2) );
|
||||
|
||||
/* klib386.s */
|
||||
_PROTOTYPE( void level0, (void (*func)(void)) );
|
||||
|
@ -14,11 +14,11 @@
|
||||
|
||||
#include "proto.h"
|
||||
#include "../../proc.h"
|
||||
#include "../../debug.h"
|
||||
|
||||
#define CR0_EM 0x0004 /* set to enable trap on any FP instruction */
|
||||
|
||||
FORWARD _PROTOTYPE( void ser_debug, (int c));
|
||||
FORWARD _PROTOTYPE( void ser_dump_stats, (void));
|
||||
|
||||
PUBLIC void arch_shutdown(int how)
|
||||
{
|
||||
@ -139,39 +139,65 @@ PUBLIC void do_ser_debug()
|
||||
|
||||
PRIVATE void ser_debug(int c)
|
||||
{
|
||||
int u = 0;
|
||||
|
||||
do_serial_debug++;
|
||||
kprintf("ser_debug: %d\n", c);
|
||||
/* Disable interrupts so that we get a consistent state. */
|
||||
if(!intr_disabled()) { lock; u = 1; };
|
||||
|
||||
switch(c)
|
||||
{
|
||||
case '1':
|
||||
ser_dump_proc();
|
||||
break;
|
||||
case '2':
|
||||
ser_dump_stats();
|
||||
ser_dump_queues();
|
||||
break;
|
||||
#define TOGGLECASE(ch, flag) \
|
||||
case ch: { \
|
||||
if(verboseflags & flag) { \
|
||||
verboseflags &= ~flag; \
|
||||
printf("%s disabled\n", #flag); \
|
||||
} else { \
|
||||
verboseflags |= flag; \
|
||||
printf("%s enabled\n", #flag); \
|
||||
} \
|
||||
break; \
|
||||
}
|
||||
TOGGLECASE('8', VF_SCHEDULING)
|
||||
TOGGLECASE('9', VF_PICKPROC)
|
||||
}
|
||||
do_serial_debug--;
|
||||
if(u) { unlock; }
|
||||
}
|
||||
|
||||
PRIVATE void printslot(struct proc *pp)
|
||||
PRIVATE void printslot(struct proc *pp, int level)
|
||||
{
|
||||
static int level = 0;
|
||||
struct proc *depproc = NULL;
|
||||
int dep = NONE;
|
||||
#define COL { int i; for(i = 0; i < level; i++) printf("> "); }
|
||||
|
||||
if(level >= NR_PROCS) {
|
||||
kprintf("loop??\n");
|
||||
return;
|
||||
}
|
||||
|
||||
level++;
|
||||
if(pp->p_ready && pp->p_rts_flags) {
|
||||
printf("HUH? p_ready but rts flags!\n");
|
||||
}
|
||||
|
||||
kprintf("%*s %d: %s %d prio %d/%d time %d/%d cr3 0x%lx rts %s misc %s ",
|
||||
level, "",
|
||||
if(!pp->p_ready && !pp->p_rts_flags) {
|
||||
printf("HUH? not p_ready but no rts flags!\n");
|
||||
}
|
||||
|
||||
COL
|
||||
|
||||
kprintf("%d: %s %d prio %d/%d time %d/%d cr3 0x%lx rts %s misc %s ready %d",
|
||||
proc_nr(pp), pp->p_name, pp->p_endpoint,
|
||||
pp->p_priority, pp->p_max_priority, pp->p_user_time,
|
||||
pp->p_sys_time, pp->p_seg.p_cr3,
|
||||
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags));
|
||||
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags),
|
||||
pp->p_ready);
|
||||
|
||||
if(pp->p_rts_flags & SENDING) {
|
||||
dep = pp->p_sendto_e;
|
||||
@ -201,65 +227,46 @@ PRIVATE void printslot(struct proc *pp)
|
||||
} else {
|
||||
kprintf("\n");
|
||||
}
|
||||
kprintf("%*s ", level, "");
|
||||
|
||||
COL
|
||||
proc_stacktrace(pp);
|
||||
|
||||
if(pp->p_rts_flags & VMREQUEST) {
|
||||
COL
|
||||
printf("vmrequest set with: %s\n", pp->p_vmrequest.stacktrace);
|
||||
}
|
||||
|
||||
if(depproc)
|
||||
printslot(depproc);
|
||||
level--;
|
||||
printslot(depproc, level+1);
|
||||
}
|
||||
|
||||
PUBLIC void ser_dump_queues()
|
||||
{
|
||||
int q;
|
||||
for(q = 0; q < NR_SCHED_QUEUES; q++) {
|
||||
struct proc *p;
|
||||
if(rdy_head[q])
|
||||
printf("%2d: ", q);
|
||||
for(p = rdy_head[q]; p; p = p->p_nextready) {
|
||||
printf("%s / %d ", p->p_name, p->p_endpoint);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
PUBLIC void ser_dump_proc()
|
||||
{
|
||||
struct proc *pp;
|
||||
int u = 0;
|
||||
|
||||
/* Disable interrupts so that we get a consistent state. */
|
||||
if(!intr_disabled()) { lock; u = 1; };
|
||||
CHECK_RUNQUEUES;
|
||||
|
||||
for (pp= BEG_PROC_ADDR; pp < END_PROC_ADDR; pp++)
|
||||
{
|
||||
if (pp->p_rts_flags & SLOT_FREE)
|
||||
continue;
|
||||
printslot(pp);
|
||||
printslot(pp, 0);
|
||||
}
|
||||
|
||||
if(u) { unlock; }
|
||||
}
|
||||
|
||||
PRIVATE void ser_dump_stats()
|
||||
{
|
||||
kprintf("ipc_stats:\n");
|
||||
kprintf("deadproc: %d\n", ipc_stats.deadproc);
|
||||
kprintf("bad_endpoint: %d\n", ipc_stats.bad_endpoint);
|
||||
kprintf("dst_not_allowed: %d\n", ipc_stats.dst_not_allowed);
|
||||
kprintf("bad_call: %d\n", ipc_stats.bad_call);
|
||||
kprintf("call_not_allowed: %d\n", ipc_stats.call_not_allowed);
|
||||
kprintf("bad_buffer: %d\n", ipc_stats.bad_buffer);
|
||||
kprintf("deadlock: %d\n", ipc_stats.deadlock);
|
||||
kprintf("not_ready: %d\n", ipc_stats.not_ready);
|
||||
kprintf("src_died: %d\n", ipc_stats.src_died);
|
||||
kprintf("dst_died: %d\n", ipc_stats.dst_died);
|
||||
kprintf("no_priv: %d\n", ipc_stats.no_priv);
|
||||
kprintf("bad_size: %d\n", ipc_stats.bad_size);
|
||||
kprintf("bad_senda: %d\n", ipc_stats.bad_senda);
|
||||
if (ex64hi(ipc_stats.total))
|
||||
{
|
||||
kprintf("total: %x:%08x\n", ex64hi(ipc_stats.total),
|
||||
ex64lo(ipc_stats.total));
|
||||
}
|
||||
else
|
||||
kprintf("total: %u\n", ex64lo(ipc_stats.total));
|
||||
|
||||
kprintf("sys_stats:\n");
|
||||
kprintf("bad_req: %d\n", sys_stats.bad_req);
|
||||
kprintf("not_allowed: %d\n", sys_stats.not_allowed);
|
||||
if (ex64hi(sys_stats.total))
|
||||
{
|
||||
kprintf("total: %x:%08x\n", ex64hi(sys_stats.total),
|
||||
ex64lo(sys_stats.total));
|
||||
}
|
||||
else
|
||||
kprintf("total: %u\n", ex64lo(sys_stats.total));
|
||||
}
|
||||
|
||||
#if SPROFILE
|
||||
|
@ -1,15 +0,0 @@
|
||||
|
||||
.define _last_cr3
|
||||
|
||||
#define LOADCR3WITHEAX(type, newcr3, ptproc) ;\
|
||||
sseg inc (_cr3switch) ;\
|
||||
sseg mov eax, newcr3 ;\
|
||||
sseg cmp (_last_cr3), eax ;\
|
||||
jz 8f ;\
|
||||
mov cr3, eax ;\
|
||||
sseg inc (_cr3reload) ;\
|
||||
sseg mov (_last_cr3), eax ;\
|
||||
sseg mov eax, (ptproc) ;\
|
||||
sseg mov (_ptproc), eax ;\
|
||||
8:
|
||||
|
@ -96,7 +96,9 @@ check_runqueues_f(char *file, int line)
|
||||
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
|
||||
if(xp->p_magic != PMAGIC)
|
||||
MYPANIC("p_magic wrong in proc table");
|
||||
if (! isemptyp(xp) && xp->p_ready && ! xp->p_found) {
|
||||
if (isemptyp(xp))
|
||||
continue;
|
||||
if(xp->p_ready && ! xp->p_found) {
|
||||
kprintf("sched error: ready proc %d not on queue\n", xp->p_nr);
|
||||
MYPANIC("ready proc not on scheduling queue");
|
||||
if (l++ > MAX_LOOP) { MYPANIC("loop in debug.c?"); }
|
||||
@ -126,6 +128,7 @@ rtsflagstr(int flags)
|
||||
FLAG(VMINHIBIT);
|
||||
FLAG(PAGEFAULT);
|
||||
FLAG(VMREQUEST);
|
||||
FLAG(VMREQTARGET);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
@ -25,8 +25,21 @@
|
||||
#define DEBUG_TIME_LOCKS 1
|
||||
|
||||
/* Runtime sanity checking. */
|
||||
#define DEBUG_VMASSERT 0
|
||||
#define DEBUG_SCHED_CHECK 0
|
||||
#define DEBUG_VMASSERT 1
|
||||
#define DEBUG_SCHED_CHECK 1
|
||||
#define DEBUG_STACK_CHECK 1
|
||||
#define DEBUG_TRACE 1
|
||||
|
||||
#if DEBUG_TRACE
|
||||
|
||||
#define VF_SCHEDULING (1L << 1)
|
||||
#define VF_PICKPROC (1L << 2)
|
||||
|
||||
#define TRACE(code, statement) if(verboseflags & code) { printf("%s:%d: ", __FILE__, __LINE__); statement }
|
||||
|
||||
#else
|
||||
#define TRACE(code, statement)
|
||||
#endif
|
||||
|
||||
#define NOREC_ENTER(varname) \
|
||||
static int varname = 0; \
|
||||
@ -50,7 +63,6 @@
|
||||
|
||||
#if DEBUG_VMASSERT
|
||||
#define vmassert(t) { \
|
||||
FIXME("vmassert on"); \
|
||||
if(!(t)) { minix_panic("vm: assert " #t " failed\n", __LINE__); } }
|
||||
#else
|
||||
#define vmassert(t) { }
|
||||
|
38
kernel/glo.h
38
kernel/glo.h
@ -16,6 +16,7 @@
|
||||
#include <minix/config.h>
|
||||
#include <archtypes.h>
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
/* Variables relating to shutting down MINIX. */
|
||||
EXTERN char kernel_exception; /* TRUE after system exceptions */
|
||||
@ -29,9 +30,9 @@ EXTERN struct k_randomness krandom; /* gather kernel random information */
|
||||
EXTERN struct loadinfo kloadinfo; /* status of load average */
|
||||
|
||||
/* Process scheduling information and the kernel reentry count. */
|
||||
EXTERN struct proc *prev_ptr; /* previously running process */
|
||||
EXTERN struct proc *proc_ptr; /* pointer to currently running process */
|
||||
EXTERN struct proc *next_ptr; /* next process to run after restart() */
|
||||
EXTERN struct proc *prev_ptr;
|
||||
EXTERN struct proc *bill_ptr; /* process to bill for clock ticks */
|
||||
EXTERN struct proc *vmrestart; /* first process on vmrestart queue */
|
||||
EXTERN struct proc *vmrequest; /* first process on vmrequest queue */
|
||||
@ -46,32 +47,6 @@ EXTERN int irq_actids[NR_IRQ_VECTORS]; /* IRQ ID bits active */
|
||||
EXTERN int irq_use; /* map of all in-use irq's */
|
||||
EXTERN u32_t system_hz; /* HZ value */
|
||||
|
||||
EXTERN struct ipc_stats
|
||||
{
|
||||
unsigned long deadproc;
|
||||
unsigned long bad_endpoint;
|
||||
unsigned long dst_not_allowed;
|
||||
unsigned long bad_call;
|
||||
unsigned long call_not_allowed;
|
||||
unsigned long bad_buffer;
|
||||
unsigned long deadlock;
|
||||
unsigned long not_ready;
|
||||
unsigned long src_died;
|
||||
unsigned long dst_died;
|
||||
unsigned long no_priv;
|
||||
unsigned long bad_size;
|
||||
unsigned long bad_senda;
|
||||
u64_t total;
|
||||
} ipc_stats;
|
||||
extern endpoint_t ipc_stats_target;
|
||||
|
||||
EXTERN struct system_stats
|
||||
{
|
||||
unsigned long bad_req;
|
||||
unsigned long not_allowed;
|
||||
u64_t total;
|
||||
} sys_stats;
|
||||
|
||||
/* Miscellaneous. */
|
||||
EXTERN reg_t mon_ss, mon_sp; /* boot monitor stack */
|
||||
EXTERN int mon_return; /* true if we can return to monitor */
|
||||
@ -84,17 +59,14 @@ EXTERN char params_buffer[512]; /* boot monitor parameters */
|
||||
EXTERN int minix_panicing;
|
||||
EXTERN int locklevel;
|
||||
|
||||
EXTERN unsigned long cr3switch;
|
||||
EXTERN unsigned long cr3reload;
|
||||
#if DEBUG_TRACE
|
||||
EXTERN int verboseflags;
|
||||
#endif
|
||||
|
||||
/* VM */
|
||||
EXTERN int vm_running;
|
||||
EXTERN int must_notify_vm;
|
||||
EXTERN struct proc *ptproc;
|
||||
|
||||
/* Verbose flags (debugging). */
|
||||
EXTERN int verbose_vm;
|
||||
|
||||
/* Timing */
|
||||
EXTERN util_timingdata_t timingdata[TIMING_CATEGORIES];
|
||||
|
||||
|
@ -159,6 +159,9 @@ PUBLIC void main()
|
||||
rp->p_reg.sp -= sizeof(reg_t);
|
||||
}
|
||||
|
||||
/* scheduling functions depend on proc_ptr pointing somewhere. */
|
||||
if(!proc_ptr) proc_ptr = rp;
|
||||
|
||||
/* If this process has its own page table, VM will set the
|
||||
* PT up and manage it. VM will signal the kernel when it has
|
||||
* done this; until then, don't let it run.
|
||||
@ -184,7 +187,7 @@ PUBLIC void main()
|
||||
/* MINIX is now ready. All boot image processes are on the ready queue.
|
||||
* Return to the assembly code to start running the current process.
|
||||
*/
|
||||
bill_ptr = proc_addr(IDLE); /* it has to point somewhere */
|
||||
bill_ptr = proc_addr(IDLE); /* it has to point somewhere */
|
||||
announce(); /* print MINIX startup banner */
|
||||
/* Warnings for sanity checks that take time. These warnings are printed
|
||||
* so it's a clear warning no full release should be done with them
|
||||
|
210
kernel/proc.c
210
kernel/proc.c
@ -67,6 +67,9 @@ FORWARD _PROTOTYPE( int try_one, (struct proc *src_ptr, struct proc *dst_ptr));
|
||||
FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
|
||||
FORWARD _PROTOTYPE( void pick_proc, (void));
|
||||
|
||||
#define PICK_ANY 1
|
||||
#define PICK_HIGHERONLY 2
|
||||
|
||||
#define BuildNotifyMessage(m_ptr, src, dst_ptr) \
|
||||
(m_ptr)->m_type = NOTIFY_FROM(src); \
|
||||
(m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
|
||||
@ -81,40 +84,76 @@ FORWARD _PROTOTYPE( void pick_proc, (void));
|
||||
break; \
|
||||
}
|
||||
|
||||
#define Deliver(rp) do { \
|
||||
vmassert(rp->p_misc_flags & MF_DELIVERMSG); \
|
||||
vmassert(rp->p_delivermsg_lin); \
|
||||
if(arch_switch_copymsg(rp, &rp->p_delivermsg, \
|
||||
rp->p_delivermsg_lin)) { \
|
||||
minix_panic("MF_DELIVERMSG copy failed", NO_NUM); \
|
||||
} \
|
||||
rp->p_delivermsg.m_source = NONE; \
|
||||
rp->p_delivermsg_lin = 0; \
|
||||
rp->p_misc_flags &= ~MF_DELIVERMSG; \
|
||||
} while(0)
|
||||
|
||||
/*===========================================================================*
|
||||
* QueueMess *
|
||||
*===========================================================================*/
|
||||
PRIVATE int QueueMess(endpoint_t ep, vir_bytes msg_lin, struct proc *dst)
|
||||
{
|
||||
int k;
|
||||
NOREC_ENTER(queuemess);
|
||||
/* Queue a message from the src process (in memory) to the dst
|
||||
* process (using dst process table entry). Do actual copy here;
|
||||
* it's an error if the copy fails.
|
||||
* process (using dst process table entry). Do actual copy to
|
||||
* kernel here; it's an error if the copy fails into kernel.
|
||||
*/
|
||||
vmassert(!(dst->p_misc_flags & MF_DELIVERMSG));
|
||||
vmassert(dst->p_delivermsg_lin);
|
||||
vmassert(isokendpt(ep, &k));
|
||||
|
||||
if(phys_copy(msg_lin, vir2phys(&dst->p_delivermsg),
|
||||
sizeof(message))) {
|
||||
return EFAULT;
|
||||
}
|
||||
dst->p_delivermsg.m_source = ep;
|
||||
dst->p_misc_flags |= MF_DELIVERMSG;
|
||||
if(iskernelp(dst) || ptproc == dst) {
|
||||
Deliver(dst);
|
||||
NOREC_RETURN(queuemess, EFAULT);
|
||||
}
|
||||
|
||||
return OK;
|
||||
dst->p_delivermsg.m_source = ep;
|
||||
dst->p_misc_flags |= MF_DELIVERMSG;
|
||||
|
||||
#if 0
|
||||
if(iskernelp(dst) || ptproc == dst) {
|
||||
printf("instant delivery to %d\n", dst->p_endpoint);
|
||||
delivermsg(dst);
|
||||
} else {
|
||||
printf("queued delivery to %d\n", dst->p_endpoint);
|
||||
}
|
||||
#endif
|
||||
|
||||
NOREC_RETURN(queuemess, OK);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* schedcheck *
|
||||
*===========================================================================*/
|
||||
PUBLIC void schedcheck(void)
|
||||
{
|
||||
/* This function is called an instant before proc_ptr is
|
||||
* to be scheduled again.
|
||||
*/
|
||||
NOREC_ENTER(schedch);
|
||||
vmassert(intr_disabled());
|
||||
if(next_ptr) {
|
||||
proc_ptr = next_ptr;
|
||||
next_ptr = NULL;
|
||||
}
|
||||
vmassert(proc_ptr);
|
||||
vmassert(!proc_ptr->p_rts_flags);
|
||||
while(proc_ptr->p_misc_flags & MF_DELIVERMSG) {
|
||||
vmassert(!next_ptr);
|
||||
vmassert(!proc_ptr->p_rts_flags);
|
||||
TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
|
||||
proc_ptr->p_name, proc_ptr->p_endpoint););
|
||||
if(delivermsg(proc_ptr) == VMSUSPEND) {
|
||||
vmassert(next_ptr);
|
||||
TRACE(VF_SCHEDULING, printf("suspending %s / %d\n",
|
||||
proc_ptr->p_name, proc_ptr->p_endpoint););
|
||||
vmassert(proc_ptr->p_rts_flags);
|
||||
vmassert(next_ptr != proc_ptr);
|
||||
proc_ptr = next_ptr;
|
||||
vmassert(!proc_ptr->p_rts_flags);
|
||||
next_ptr = NULL;
|
||||
}
|
||||
}
|
||||
TRACE(VF_SCHEDULING, printf("starting %s / %d\n",
|
||||
proc_ptr->p_name, proc_ptr->p_endpoint););
|
||||
NOREC_RETURN(schedch, );
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
@ -146,9 +185,6 @@ long bit_map; /* notification event set or flags */
|
||||
}
|
||||
#endif
|
||||
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.total= add64u(ipc_stats.total, 1);
|
||||
|
||||
#if 0
|
||||
if(src_dst_e != 4 && src_dst_e != 5 &&
|
||||
caller_ptr->p_endpoint != 4 && caller_ptr->p_endpoint != 5) {
|
||||
@ -167,8 +203,6 @@ long bit_map; /* notification event set or flags */
|
||||
if (RTS_ISSET(caller_ptr, SLOT_FREE))
|
||||
{
|
||||
kprintf("called by the dead?!?\n");
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.deadproc++;
|
||||
return EINVAL;
|
||||
}
|
||||
#endif
|
||||
@ -193,8 +227,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_e);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_endpoint++;
|
||||
return EINVAL;
|
||||
}
|
||||
src_dst_p = src_dst_e;
|
||||
@ -214,8 +246,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_e);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_endpoint++;
|
||||
return EDEADSRCDST;
|
||||
}
|
||||
|
||||
@ -233,8 +263,6 @@ long bit_map; /* notification event set or flags */
|
||||
call_nr, proc_nr(caller_ptr),
|
||||
caller_ptr->p_name, src_dst_p);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.dst_not_allowed++;
|
||||
return(ECALLDENIED); /* call denied by ipc mask */
|
||||
}
|
||||
}
|
||||
@ -247,8 +275,6 @@ long bit_map; /* notification event set or flags */
|
||||
"sys_call: ipc mask denied trap %d from %d to %d\n",
|
||||
call_nr, caller_ptr->p_endpoint, src_dst_e);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.dst_not_allowed++;
|
||||
return(ECALLDENIED); /* call denied by ipc mask */
|
||||
}
|
||||
}
|
||||
@ -261,8 +287,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_p);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_call++;
|
||||
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
||||
}
|
||||
|
||||
@ -275,8 +299,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_p);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.call_not_allowed++;
|
||||
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
||||
}
|
||||
|
||||
@ -285,8 +307,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_e);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.call_not_allowed++;
|
||||
return(ETRAPDENIED); /* trap denied by mask or kernel */
|
||||
}
|
||||
|
||||
@ -314,8 +334,6 @@ long bit_map; /* notification event set or flags */
|
||||
kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
|
||||
call_nr, proc_nr(caller_ptr), src_dst_p, group_size);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.deadlock++;
|
||||
return(ELOCKED);
|
||||
}
|
||||
}
|
||||
@ -457,8 +475,6 @@ int flags;
|
||||
|
||||
if (RTS_ISSET(dst_ptr, NO_ENDPOINT))
|
||||
{
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.dst_died++;
|
||||
return EDSTDIED;
|
||||
}
|
||||
|
||||
@ -473,8 +489,6 @@ int flags;
|
||||
RTS_UNSET(dst_ptr, RECEIVING);
|
||||
} else {
|
||||
if(flags & NON_BLOCKING) {
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.not_ready++;
|
||||
return(ENOTREADY);
|
||||
}
|
||||
|
||||
@ -525,6 +539,7 @@ int flags;
|
||||
|
||||
/* This is where we want our message. */
|
||||
caller_ptr->p_delivermsg_lin = linaddr;
|
||||
caller_ptr->p_delivermsg_vir = m_ptr;
|
||||
|
||||
if(src_e == ANY) src_p = ANY;
|
||||
else
|
||||
@ -532,8 +547,6 @@ int flags;
|
||||
okendpt(src_e, &src_p);
|
||||
if (RTS_ISSET(proc_addr(src_p), NO_ENDPOINT))
|
||||
{
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.src_died++;
|
||||
return ESRCDIED;
|
||||
}
|
||||
}
|
||||
@ -550,6 +563,7 @@ int flags;
|
||||
|
||||
map = &priv(caller_ptr)->s_notify_pending;
|
||||
for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
|
||||
endpoint_t hisep;
|
||||
|
||||
/* Find a pending notification from the requested source. */
|
||||
if (! *chunk) continue; /* no bits in chunk */
|
||||
@ -567,8 +581,10 @@ int flags;
|
||||
|
||||
/* Found a suitable source, deliver the notification message. */
|
||||
BuildNotifyMessage(&m, src_proc_nr, caller_ptr); /* assemble message */
|
||||
hisep = proc_addr(src_proc_nr)->p_endpoint;
|
||||
vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
|
||||
if((r=QueueMess(src_proc_nr, vir2phys(&m), caller_ptr)) != OK) {
|
||||
vmassert(src_e == ANY || hisep == src_e);
|
||||
if((r=QueueMess(hisep, vir2phys(&m), caller_ptr)) != OK) {
|
||||
minix_panic("mini_receive: local QueueMess failed", NO_NUM);
|
||||
}
|
||||
return(OK); /* report success */
|
||||
@ -585,8 +601,6 @@ int flags;
|
||||
kprintf("%d: receive from %d; found dead %d (%s)?\n",
|
||||
caller_ptr->p_endpoint, src_e, (*xpp)->p_endpoint,
|
||||
(*xpp)->p_name);
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.deadproc++;
|
||||
return EINVAL;
|
||||
}
|
||||
#endif
|
||||
@ -629,8 +643,6 @@ int flags;
|
||||
RTS_SET(caller_ptr, RECEIVING);
|
||||
return(OK);
|
||||
} else {
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.not_ready++;
|
||||
return(ENOTREADY);
|
||||
}
|
||||
}
|
||||
@ -724,8 +736,6 @@ size_t size;
|
||||
{
|
||||
kprintf(
|
||||
"mini_senda: warning caller has no privilege structure\n");
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.no_priv++;
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
@ -747,8 +757,6 @@ size_t size;
|
||||
*/
|
||||
if (size > 16*(NR_TASKS + NR_PROCS))
|
||||
{
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_size++;
|
||||
return EDOM;
|
||||
}
|
||||
|
||||
@ -770,8 +778,6 @@ size_t size;
|
||||
if (flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY) ||
|
||||
!(flags & AMF_VALID))
|
||||
{
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_senda++;
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
@ -935,8 +941,6 @@ struct proc *dst_ptr;
|
||||
{
|
||||
kprintf("try_one: bad bits in table\n");
|
||||
privp->s_asynsize= 0;
|
||||
if (src_ptr->p_endpoint == ipc_stats_target)
|
||||
ipc_stats.bad_senda++;
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
@ -1030,15 +1034,19 @@ register struct proc *rp; /* this process is now runnable */
|
||||
int q; /* scheduling queue to use */
|
||||
int front; /* add to front or back */
|
||||
|
||||
NOREC_ENTER(enqueuefunc);
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM); }
|
||||
CHECK_RUNQUEUES;
|
||||
if (rp->p_ready) minix_panic("enqueue already ready process", NO_NUM);
|
||||
#endif
|
||||
|
||||
/* Determine where to insert to process. */
|
||||
sched(rp, &q, &front);
|
||||
|
||||
vmassert(q >= 0);
|
||||
vmassert(q < IDLE_Q || rp->p_endpoint == IDLE);
|
||||
|
||||
/* Now add the process to the queue. */
|
||||
if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
|
||||
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
|
||||
@ -1054,19 +1062,30 @@ register struct proc *rp; /* this process is now runnable */
|
||||
rp->p_nextready = NIL_PROC; /* mark new end */
|
||||
}
|
||||
|
||||
/* Now select the next process to run, if there isn't a current
|
||||
* process yet or current process isn't ready any more, or
|
||||
* it's PREEMPTIBLE.
|
||||
*/
|
||||
if(!proc_ptr || proc_ptr->p_rts_flags ||
|
||||
(priv(proc_ptr)->s_flags & PREEMPTIBLE)) {
|
||||
pick_proc();
|
||||
}
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
rp->p_ready = 1;
|
||||
CHECK_RUNQUEUES;
|
||||
#endif
|
||||
|
||||
/* Now select the next process to run, if there isn't a current
|
||||
* process yet or current process isn't ready any more, or
|
||||
* it's PREEMPTIBLE.
|
||||
*/
|
||||
FIXME("PREEMPTIBLE test?");
|
||||
vmassert(proc_ptr);
|
||||
#if 0
|
||||
if(!proc_ptr || proc_ptr->p_rts_flags)
|
||||
#else
|
||||
if((proc_ptr->p_priority > rp->p_priority) &&
|
||||
(priv(proc_ptr)->s_flags & PREEMPTIBLE))
|
||||
#endif
|
||||
pick_proc();
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
CHECK_RUNQUEUES;
|
||||
#endif
|
||||
|
||||
NOREC_RETURN(enqueuefunc, );
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
@ -1083,14 +1102,17 @@ register struct proc *rp; /* this process is no longer runnable */
|
||||
register struct proc **xpp; /* iterate over queue */
|
||||
register struct proc *prev_xp;
|
||||
|
||||
NOREC_ENTER(dequeuefunc);
|
||||
|
||||
#if DEBUG_STACK_CHECK
|
||||
/* Side-effect for kernel: check if the task's stack still is ok? */
|
||||
if (iskernelp(rp)) {
|
||||
if (*priv(rp)->s_stack_guard != STACK_GUARD)
|
||||
minix_panic("stack overrun by task", proc_nr(rp));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
CHECK_RUNQUEUES;
|
||||
if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM); }
|
||||
if (! rp->p_ready) minix_panic("dequeue() already unready process", NO_NUM);
|
||||
#endif
|
||||
@ -1106,17 +1128,23 @@ register struct proc *rp; /* this process is no longer runnable */
|
||||
*xpp = (*xpp)->p_nextready; /* replace with next chain */
|
||||
if (rp == rdy_tail[q]) /* queue tail removed */
|
||||
rdy_tail[q] = prev_xp; /* set new tail */
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
rp->p_ready = 0;
|
||||
CHECK_RUNQUEUES;
|
||||
#endif
|
||||
if (rp == proc_ptr || rp == next_ptr) /* active process removed */
|
||||
pick_proc(); /* pick new process to run */
|
||||
pick_proc(); /* pick new process to run */
|
||||
break;
|
||||
}
|
||||
prev_xp = *xpp; /* save previous in chain */
|
||||
}
|
||||
|
||||
#if DEBUG_SCHED_CHECK
|
||||
rp->p_ready = 0;
|
||||
CHECK_RUNQUEUES;
|
||||
#endif
|
||||
|
||||
NOREC_RETURN(dequeuefunc, );
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
@ -1162,32 +1190,28 @@ PRIVATE void pick_proc()
|
||||
* clock task can tell who to bill for system time.
|
||||
*/
|
||||
register struct proc *rp; /* process to run */
|
||||
int q; /* iterate over queues */
|
||||
int q; /* iterate over queues */
|
||||
|
||||
NOREC_ENTER(pick);
|
||||
|
||||
/* Check each of the scheduling queues for ready processes. The number of
|
||||
* queues is defined in proc.h, and priorities are set in the task table.
|
||||
* The lowest queue contains IDLE, which is always ready.
|
||||
*/
|
||||
for (q=0; q < NR_SCHED_QUEUES; q++) {
|
||||
if ( (rp = rdy_head[q]) != NIL_PROC) {
|
||||
if(rp->p_misc_flags & MF_DELIVERMSG) {
|
||||
/* Want to schedule process, but have to copy a message
|
||||
* first.
|
||||
*/
|
||||
FIXME("MF_DELIVERMSG no callback");
|
||||
Deliver(rp);
|
||||
}
|
||||
next_ptr = rp; /* run process 'rp' next */
|
||||
#if 0
|
||||
if(!iskernelp(rp))
|
||||
kprintf("[run %s/%d]", rp->p_name, rp->p_endpoint);
|
||||
#endif
|
||||
if (priv(rp)->s_flags & BILLABLE)
|
||||
bill_ptr = rp; /* bill for system time */
|
||||
return;
|
||||
}
|
||||
int found = 0;
|
||||
if(!(rp = rdy_head[q])) {
|
||||
TRACE(VF_PICKPROC, printf("queue %d empty\n", q););
|
||||
continue;
|
||||
}
|
||||
TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n",
|
||||
rp->p_name, rp->p_endpoint, q););
|
||||
next_ptr = rp; /* run process 'rp' next */
|
||||
vmassert(!next_ptr->p_rts_flags);
|
||||
if (priv(rp)->s_flags & BILLABLE)
|
||||
bill_ptr = rp; /* bill for system time */
|
||||
NOREC_RETURN(pick, );
|
||||
}
|
||||
minix_panic("no ready process", NO_NUM);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
|
@ -47,6 +47,7 @@ struct proc {
|
||||
|
||||
message p_sendmsg; /* Message from this process if SENDING */
|
||||
message p_delivermsg; /* Message for this process if MF_DELIVERMSG */
|
||||
vir_bytes p_delivermsg_vir; /* Virtual addr this proc wants message at */
|
||||
vir_bytes p_delivermsg_lin; /* Linear addr this proc wants message at */
|
||||
|
||||
/* If handler functions detect a process wants to do something with
|
||||
@ -60,7 +61,8 @@ struct proc {
|
||||
struct proc *nextrestart; /* next in vmrestart chain */
|
||||
struct proc *nextrequestor; /* next in vmrequest chain */
|
||||
#define VMSTYPE_SYS_NONE 0
|
||||
#define VMSTYPE_SYS_MESSAGE 1
|
||||
#define VMSTYPE_KERNELCALL 1
|
||||
#define VMSTYPE_DELIVERMSG 2
|
||||
int type; /* suspended operation */
|
||||
union {
|
||||
/* VMSTYPE_SYS_MESSAGE */
|
||||
@ -75,10 +77,9 @@ struct proc {
|
||||
/* VM result when available */
|
||||
int vmresult;
|
||||
|
||||
/* Target gets this set. (But caller and target can be
|
||||
* the same, so we can't put this in the 'saved' union.)
|
||||
*/
|
||||
struct proc *requestor;
|
||||
#if DEBUG_VMASSERT
|
||||
char stacktrace[200];
|
||||
#endif
|
||||
|
||||
/* If the suspended operation is a sys_call, its details are
|
||||
* stored here.
|
||||
@ -96,18 +97,19 @@ struct proc {
|
||||
};
|
||||
|
||||
/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
|
||||
#define SLOT_FREE 0x01 /* process slot is free */
|
||||
#define NO_PRIORITY 0x02 /* process has been stopped */
|
||||
#define SENDING 0x04 /* process blocked trying to send */
|
||||
#define RECEIVING 0x08 /* process blocked trying to receive */
|
||||
#define SIGNALED 0x10 /* set when new kernel signal arrives */
|
||||
#define SIG_PENDING 0x20 /* unready while signal being processed */
|
||||
#define P_STOP 0x40 /* set when process is being traced */
|
||||
#define NO_PRIV 0x80 /* keep forked system process from running */
|
||||
#define NO_ENDPOINT 0x100 /* process cannot send or receive messages */
|
||||
#define VMINHIBIT 0x200 /* not scheduled until pagetable set by VM */
|
||||
#define PAGEFAULT 0x400 /* process has unhandled pagefault */
|
||||
#define VMREQUEST 0x800 /* originator of vm memory request */
|
||||
#define SLOT_FREE 0x01 /* process slot is free */
|
||||
#define NO_PRIORITY 0x02 /* process has been stopped */
|
||||
#define SENDING 0x04 /* process blocked trying to send */
|
||||
#define RECEIVING 0x08 /* process blocked trying to receive */
|
||||
#define SIGNALED 0x10 /* set when new kernel signal arrives */
|
||||
#define SIG_PENDING 0x20 /* unready while signal being processed */
|
||||
#define P_STOP 0x40 /* set when process is being traced */
|
||||
#define NO_PRIV 0x80 /* keep forked system process from running */
|
||||
#define NO_ENDPOINT 0x100 /* process cannot send or receive messages */
|
||||
#define VMINHIBIT 0x200 /* not scheduled until pagetable set by VM */
|
||||
#define PAGEFAULT 0x400 /* process has unhandled pagefault */
|
||||
#define VMREQUEST 0x800 /* originator of vm memory request */
|
||||
#define VMREQTARGET 0x1000 /* target of vm memory request */
|
||||
|
||||
/* These runtime flags can be tested and manipulated by these macros. */
|
||||
|
||||
@ -117,33 +119,43 @@ struct proc {
|
||||
/* Set flag and dequeue if the process was runnable. */
|
||||
#define RTS_SET(rp, f) \
|
||||
do { \
|
||||
vmassert(intr_disabled()); \
|
||||
if(!(rp)->p_rts_flags) { dequeue(rp); } \
|
||||
(rp)->p_rts_flags |= (f); \
|
||||
vmassert(intr_disabled()); \
|
||||
} while(0)
|
||||
|
||||
/* Clear flag and enqueue if the process was not runnable but is now. */
|
||||
#define RTS_UNSET(rp, f) \
|
||||
do { \
|
||||
int rts; \
|
||||
rts = (rp)->p_rts_flags; \
|
||||
vmassert(intr_disabled()); \
|
||||
rts = (rp)->p_rts_flags; \
|
||||
(rp)->p_rts_flags &= ~(f); \
|
||||
if(rts && !(rp)->p_rts_flags) { enqueue(rp); } \
|
||||
vmassert(intr_disabled()); \
|
||||
} while(0)
|
||||
|
||||
/* Set flag and dequeue if the process was runnable. */
|
||||
#define RTS_LOCK_SET(rp, f) \
|
||||
do { \
|
||||
if(!(rp)->p_rts_flags) { lock_dequeue(rp); } \
|
||||
int u = 0; \
|
||||
if(!intr_disabled()) { u = 1; lock; } \
|
||||
if(!(rp)->p_rts_flags) { dequeue(rp); } \
|
||||
(rp)->p_rts_flags |= (f); \
|
||||
if(u) { unlock; } \
|
||||
} while(0)
|
||||
|
||||
/* Clear flag and enqueue if the process was not runnable but is now. */
|
||||
#define RTS_LOCK_UNSET(rp, f) \
|
||||
do { \
|
||||
int rts; \
|
||||
rts = (rp)->p_rts_flags; \
|
||||
int u = 0; \
|
||||
if(!intr_disabled()) { u = 1; lock; } \
|
||||
rts = (rp)->p_rts_flags; \
|
||||
(rp)->p_rts_flags &= ~(f); \
|
||||
if(rts && !(rp)->p_rts_flags) { lock_enqueue(rp); } \
|
||||
if(rts && !(rp)->p_rts_flags) { enqueue(rp); } \
|
||||
if(u) { unlock; } \
|
||||
} while(0)
|
||||
|
||||
/* Set flags to this value. */
|
||||
|
@ -39,6 +39,7 @@ _PROTOTYPE( void lock_dequeue, (struct proc *rp) );
|
||||
_PROTOTYPE( void enqueue, (struct proc *rp) );
|
||||
_PROTOTYPE( void dequeue, (struct proc *rp) );
|
||||
_PROTOTYPE( void balance_queues, (struct timer *tp) );
|
||||
_PROTOTYPE( void schedcheck, (void) );
|
||||
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
|
||||
#if DEBUG_ENABLE_IPC_WARNINGS
|
||||
_PROTOTYPE( int isokendpt_f, (char *file, int line, endpoint_t e, int *p, int f));
|
||||
@ -162,7 +163,9 @@ _PROTOTYPE( int vm_checkrange, (struct proc *caller, struct proc *target,
|
||||
vir_bytes start, vir_bytes length, int writeflag, int checkonly));
|
||||
_PROTOTYPE( void proc_stacktrace, (struct proc *proc) );
|
||||
_PROTOTYPE( int vm_lookup, (struct proc *proc, vir_bytes virtual, vir_bytes *result, u32_t *ptent));
|
||||
_PROTOTYPE( int vm_suspend, (struct proc *caller, struct proc *target));
|
||||
_PROTOTYPE( int vm_suspend, (struct proc *caller, struct proc *target,
|
||||
phys_bytes lin, phys_bytes size, int wrflag, int type));
|
||||
_PROTOTYPE( int delivermsg, (struct proc *target));
|
||||
_PROTOTYPE( phys_bytes arch_switch_copymsg, (struct proc *rp, message *m,
|
||||
phys_bytes lin));
|
||||
|
||||
|
@ -58,7 +58,8 @@ char *callnames[NR_SYS_CALLS];
|
||||
FORWARD _PROTOTYPE( void initialize, (void));
|
||||
FORWARD _PROTOTYPE( struct proc *vmrestart_check, (message *));
|
||||
|
||||
u32_t cr3_test, cr3_reload, createpde, linlincopies, physzero;
|
||||
u32_t cr3_test, cr3_reload, newpde, overwritepde,
|
||||
linlincopies, physzero, invlpgs, npagefaults, vmreqs, vmcheckranges, straightpdes;
|
||||
|
||||
/*===========================================================================*
|
||||
* sys_task *
|
||||
@ -87,21 +88,32 @@ PUBLIC void sys_task()
|
||||
/* Get work. Block and wait until a request message arrives. */
|
||||
if((r=receive(ANY, &m)) != OK)
|
||||
minix_panic("receive() failed", r);
|
||||
}
|
||||
}
|
||||
|
||||
#if 1
|
||||
{
|
||||
static int prevu;
|
||||
int u;
|
||||
int u, dt;
|
||||
u = get_uptime();
|
||||
if(u/system_hz != prevu/system_hz) {
|
||||
printf("cr3 tests: %5lu reloads: %5lu createpde: %5lu linlincopies: %5lu physzero: %5lu\n",
|
||||
cr3_test, cr3_reload, createpde, linlincopies, physzero);
|
||||
cr3_test = 1;
|
||||
dt = u - prevu;
|
||||
if(dt >= 5*system_hz) {
|
||||
#define PERSEC(n) ((n)*system_hz/dt)
|
||||
printf("%6d cr3 tests: %5lu cr3: %5lu straightpdes: %5lu newpde: %5lu overwritepde %5lu linlincopies: %5lu physzero: %5lu invlpgs: %5lu pagefaults: %5lu vmreq: %5lu vmcheckranges: %5lu\n",
|
||||
u/system_hz,
|
||||
PERSEC(cr3_test), PERSEC(cr3_reload),
|
||||
PERSEC(straightpdes), PERSEC(newpde),
|
||||
PERSEC(overwritepde),
|
||||
PERSEC(linlincopies), PERSEC(physzero),
|
||||
PERSEC(invlpgs), PERSEC(npagefaults),
|
||||
PERSEC(vmreqs), PERSEC(vmcheckranges));
|
||||
cr3_reload = 0;
|
||||
createpde = linlincopies = physzero = 0;
|
||||
cr3_test = 0;
|
||||
newpde = overwritepde = linlincopies =
|
||||
physzero = invlpgs = straightpdes = 0;
|
||||
npagefaults = 0;
|
||||
vmreqs = vmcheckranges = 0;
|
||||
prevu = u;
|
||||
}
|
||||
prevu = u;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -111,41 +123,13 @@ PUBLIC void sys_task()
|
||||
okendpt(who_e, &who_p);
|
||||
caller_ptr = proc_addr(who_p);
|
||||
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
sys_stats.total= add64u(sys_stats.total, 1);
|
||||
|
||||
/* See if the caller made a valid request and try to handle it. */
|
||||
if (call_nr < 0 || call_nr >= NR_SYS_CALLS) { /* check call number */
|
||||
#if DEBUG_ENABLE_IPC_WARNINGS
|
||||
kprintf("SYSTEM: illegal request %d from %d.\n",
|
||||
call_nr,m.m_source);
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
sys_stats.bad_req++;
|
||||
result = EBADREQUEST; /* illegal message type */
|
||||
}
|
||||
else if (!GET_BIT(priv(caller_ptr)->s_k_call_mask, call_nr)) {
|
||||
#if DEBUG_ENABLE_IPC_WARNINGS
|
||||
static int curr= 0, limit= 100, extra= 20;
|
||||
|
||||
if (curr < limit+extra)
|
||||
{
|
||||
#if 0
|
||||
kprintf("SYSTEM: request %d from %d denied.\n",
|
||||
call_nr, m.m_source);
|
||||
#else
|
||||
FIXME("privileges bypassed");
|
||||
#endif
|
||||
} else if (curr == limit+extra)
|
||||
{
|
||||
kprintf("sys_task: no debug output for a while\n");
|
||||
}
|
||||
else if (curr == 2*limit-1)
|
||||
limit *= 2;
|
||||
curr++;
|
||||
#endif
|
||||
if (caller_ptr->p_endpoint == ipc_stats_target)
|
||||
sys_stats.not_allowed++;
|
||||
result = ECALLDENIED; /* illegal message type */
|
||||
}
|
||||
else {
|
||||
@ -157,15 +141,18 @@ PUBLIC void sys_task()
|
||||
* until VM tells us it's allowed. VM has been notified
|
||||
* and we must wait for its reply to restart the call.
|
||||
*/
|
||||
vmassert(RTS_ISSET(caller_ptr, VMREQUEST));
|
||||
vmassert(caller_ptr->p_vmrequest.type == VMSTYPE_KERNELCALL);
|
||||
memcpy(&caller_ptr->p_vmrequest.saved.reqmsg, &m, sizeof(m));
|
||||
caller_ptr->p_vmrequest.type = VMSTYPE_SYS_MESSAGE;
|
||||
} else if (result != EDONTREPLY) {
|
||||
/* Send a reply, unless inhibited by a handler function.
|
||||
* Use the kernel function lock_send() to prevent a system
|
||||
* call trap.
|
||||
*/
|
||||
if(restarting)
|
||||
RTS_LOCK_UNSET(restarting, VMREQUEST);
|
||||
if(restarting) {
|
||||
vmassert(!RTS_ISSET(restarting, VMREQUEST));
|
||||
vmassert(!RTS_ISSET(restarting, VMREQTARGET));
|
||||
}
|
||||
m.m_type = result; /* report status of call */
|
||||
if(WILLRECEIVE(caller_ptr, SYSTEM)) {
|
||||
if (OK != (s=lock_send(m.m_source, &m))) {
|
||||
@ -561,23 +548,18 @@ PRIVATE struct proc *vmrestart_check(message *m)
|
||||
if(!(restarting = vmrestart))
|
||||
return NULL;
|
||||
|
||||
if(restarting->p_rts_flags & SLOT_FREE)
|
||||
minix_panic("SYSTEM: VMREQUEST set for empty process", NO_NUM);
|
||||
vmassert(!RTS_ISSET(restarting, SLOT_FREE));
|
||||
vmassert(RTS_ISSET(restarting, VMREQUEST));
|
||||
|
||||
type = restarting->p_vmrequest.type;
|
||||
restarting->p_vmrequest.type = VMSTYPE_SYS_NONE;
|
||||
vmrestart = restarting->p_vmrequest.nextrestart;
|
||||
|
||||
if(!RTS_ISSET(restarting, VMREQUEST))
|
||||
minix_panic("SYSTEM: VMREQUEST not set for process on vmrestart queue",
|
||||
restarting->p_endpoint);
|
||||
|
||||
switch(type) {
|
||||
case VMSTYPE_SYS_MESSAGE:
|
||||
case VMSTYPE_KERNELCALL:
|
||||
memcpy(m, &restarting->p_vmrequest.saved.reqmsg, sizeof(*m));
|
||||
if(m->m_source != restarting->p_endpoint)
|
||||
minix_panic("SYSTEM: vmrestart source doesn't match",
|
||||
NO_NUM);
|
||||
restarting->p_vmrequest.saved.reqmsg.m_source = NONE;
|
||||
vmassert(m->m_source == restarting->p_endpoint);
|
||||
/* Original caller could've disappeared in the meantime. */
|
||||
if(!isokendpt(m->m_source, &who_p)) {
|
||||
kprintf("SYSTEM: ignoring call %d from dead %d\n",
|
||||
|
@ -63,19 +63,8 @@ register message *m_ptr; /* pointer to request message */
|
||||
}
|
||||
if (i >= nr_io_range)
|
||||
{
|
||||
static int curr= 0, limit= 100, extra= 20;
|
||||
|
||||
if (curr < limit+extra)
|
||||
{
|
||||
kprintf("do_devio: port 0x%x (size %d) not allowed\n",
|
||||
m_ptr->DIO_PORT, size);
|
||||
} else if (curr == limit+extra)
|
||||
{
|
||||
kprintf("do_devio: no debug output for a while\n");
|
||||
}
|
||||
else if (curr == 2*limit-1)
|
||||
limit *= 2;
|
||||
curr++;
|
||||
return EPERM;
|
||||
}
|
||||
}
|
||||
@ -83,19 +72,8 @@ register message *m_ptr; /* pointer to request message */
|
||||
doit:
|
||||
if (m_ptr->DIO_PORT & (size-1))
|
||||
{
|
||||
static int curr= 0, limit= 100, extra= 20;
|
||||
|
||||
if (curr < limit+extra)
|
||||
{
|
||||
kprintf("do_devio: unaligned port 0x%x (size %d)\n",
|
||||
m_ptr->DIO_PORT, size);
|
||||
} else if (curr == limit+extra)
|
||||
{
|
||||
kprintf("do_devio: no debug output for a while\n");
|
||||
}
|
||||
else if (curr == 2*limit-1)
|
||||
limit *= 2;
|
||||
curr++;
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ register message *m_ptr; /* pointer to request message */
|
||||
|
||||
/* Calculate endpoint identifier, so caller knows what it is. */
|
||||
m_ptr->PR_ENDPT = rpc->p_endpoint;
|
||||
m_ptr->PR_FORK_MSGADDR = (char *) rpp->p_delivermsg_lin;
|
||||
m_ptr->PR_FORK_MSGADDR = (char *) rpp->p_delivermsg_vir;
|
||||
|
||||
/* Install new map */
|
||||
r = newmap(rpc, map_ptr);
|
||||
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
|
||||
#include "../system.h"
|
||||
#include "../vm.h"
|
||||
|
||||
#if USE_MEMSET
|
||||
|
||||
|
@ -61,22 +61,11 @@ endpoint_t *e_granter; /* new granter (magic grants) */
|
||||
if(!HASGRANTTABLE(granter_proc)) return EPERM;
|
||||
|
||||
if(priv(granter_proc)->s_grant_entries <= grant) {
|
||||
static int curr= 0, limit= 100, extra= 20;
|
||||
|
||||
if (curr < limit+extra)
|
||||
{
|
||||
kprintf(
|
||||
"verify_grant: grant verify failed in ep %d proc %d: "
|
||||
"grant %d out of range for table size %d\n",
|
||||
granter, proc_nr, grant,
|
||||
priv(granter_proc)->s_grant_entries);
|
||||
} else if (curr == limit+extra)
|
||||
{
|
||||
kprintf("verify_grant: no debug output for a while\n");
|
||||
}
|
||||
else if (curr == 2*limit-1)
|
||||
limit *= 2;
|
||||
curr++;
|
||||
return(EPERM);
|
||||
}
|
||||
|
||||
@ -219,23 +208,9 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
|
||||
/* Verify permission exists. */
|
||||
if((r=verify_grant(granter, grantee, grantid, bytes, access,
|
||||
g_offset, &v_offset, &new_granter)) != OK) {
|
||||
static int curr= 0, limit= 100, extra= 20;
|
||||
|
||||
if (curr < limit+extra)
|
||||
{
|
||||
#if 0
|
||||
kprintf(
|
||||
"grant %d verify to copy %d->%d by %d failed: err %d\n",
|
||||
grantid, *src, *dst, grantee, r);
|
||||
#endif
|
||||
} else if (curr == limit+extra)
|
||||
{
|
||||
kprintf(
|
||||
"do_safecopy`safecopy: no debug output for a while\n");
|
||||
}
|
||||
else if (curr == 2*limit-1)
|
||||
limit *= 2;
|
||||
curr++;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <minix/type.h>
|
||||
#include <minix/config.h>
|
||||
|
||||
extern int verifyrange;
|
||||
|
||||
/*===========================================================================*
|
||||
* do_vmctl *
|
||||
*===========================================================================*/
|
||||
@ -21,7 +23,7 @@ register message *m_ptr; /* pointer to request message */
|
||||
{
|
||||
int proc_nr, i;
|
||||
endpoint_t ep = m_ptr->SVMCTL_WHO;
|
||||
struct proc *p, *rp;
|
||||
struct proc *p, *rp, *target;
|
||||
|
||||
if(ep == SELF) { ep = m_ptr->m_source; }
|
||||
|
||||
@ -40,15 +42,25 @@ register message *m_ptr; /* pointer to request message */
|
||||
/* Send VM the information about the memory request. */
|
||||
if(!(rp = vmrequest))
|
||||
return ESRCH;
|
||||
if(!RTS_ISSET(rp, VMREQUEST))
|
||||
minix_panic("do_vmctl: no VMREQUEST set", NO_NUM);
|
||||
vmassert(RTS_ISSET(rp, VMREQUEST));
|
||||
|
||||
#if 0
|
||||
printf("kernel: vm request sent by: %s / %d about %d; 0x%lx-0x%lx, wr %d\n",
|
||||
printf("kernel: vm request sent by: %s / %d about %d; 0x%lx-0x%lx, wr %d, stack: %s ",
|
||||
rp->p_name, rp->p_endpoint, rp->p_vmrequest.who,
|
||||
rp->p_vmrequest.start,
|
||||
rp->p_vmrequest.start + rp->p_vmrequest.length,
|
||||
rp->p_vmrequest.writeflag);
|
||||
rp->p_vmrequest.writeflag, rp->p_vmrequest.stacktrace);
|
||||
printf("type %d\n", rp->p_vmrequest.type);
|
||||
#endif
|
||||
|
||||
#if DEBUG_VMASSERT
|
||||
okendpt(rp->p_vmrequest.who, &proc_nr);
|
||||
target = proc_addr(proc_nr);
|
||||
if(!RTS_ISSET(target, VMREQTARGET)) {
|
||||
printf("set stack: %s\n", rp->p_vmrequest.stacktrace);
|
||||
minix_panic("VMREQTARGET not set for target",
|
||||
NO_NUM);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Reply with request fields. */
|
||||
@ -56,6 +68,7 @@ register message *m_ptr; /* pointer to request message */
|
||||
m_ptr->SVMCTL_MRG_LEN = rp->p_vmrequest.length;
|
||||
m_ptr->SVMCTL_MRG_WRITE = rp->p_vmrequest.writeflag;
|
||||
m_ptr->SVMCTL_MRG_EP = rp->p_vmrequest.who;
|
||||
m_ptr->SVMCTL_MRG_REQUESTOR = (void *) rp->p_endpoint;
|
||||
rp->p_vmrequest.vmresult = VMSUSPEND;
|
||||
|
||||
/* Remove from request chain. */
|
||||
@ -63,41 +76,84 @@ register message *m_ptr; /* pointer to request message */
|
||||
|
||||
return OK;
|
||||
case VMCTL_MEMREQ_REPLY:
|
||||
if(!(rp = p->p_vmrequest.requestor))
|
||||
minix_panic("do_vmctl: no requestor set", ep);
|
||||
p->p_vmrequest.requestor = NULL;
|
||||
if(!RTS_ISSET(rp, VMREQUEST))
|
||||
minix_panic("do_vmctl: no VMREQUEST set", ep);
|
||||
if(rp->p_vmrequest.vmresult != VMSUSPEND)
|
||||
minix_panic("do_vmctl: result not VMSUSPEND set",
|
||||
rp->p_vmrequest.vmresult);
|
||||
rp->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
|
||||
if(rp->p_vmrequest.vmresult == VMSUSPEND)
|
||||
minix_panic("VM returned VMSUSPEND?", NO_NUM);
|
||||
if(rp->p_vmrequest.vmresult != OK)
|
||||
vmassert(RTS_ISSET(p, VMREQUEST));
|
||||
vmassert(p->p_vmrequest.vmresult == VMSUSPEND);
|
||||
okendpt(p->p_vmrequest.who, &proc_nr);
|
||||
target = proc_addr(proc_nr);
|
||||
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
|
||||
vmassert(p->p_vmrequest.vmresult != VMSUSPEND);
|
||||
if(p->p_vmrequest.vmresult != OK)
|
||||
kprintf("SYSTEM: VM replied %d to mem request\n",
|
||||
rp->p_vmrequest.vmresult);
|
||||
p->p_vmrequest.vmresult);
|
||||
|
||||
/* Put on restart chain. */
|
||||
rp->p_vmrequest.nextrestart = vmrestart;
|
||||
vmrestart = rp;
|
||||
|
||||
#if 0
|
||||
printf("memreq reply: vm request sent by: %s / %d about %d; 0x%lx-0x%lx, wr %d, stack: %s ",
|
||||
p->p_name, p->p_endpoint, p->p_vmrequest.who,
|
||||
p->p_vmrequest.start,
|
||||
p->p_vmrequest.start + p->p_vmrequest.length,
|
||||
p->p_vmrequest.writeflag, p->p_vmrequest.stacktrace);
|
||||
printf("type %d\n", p->p_vmrequest.type);
|
||||
#endif
|
||||
|
||||
#if DEBUG_VMASSERT
|
||||
{
|
||||
vmassert(target->p_rts_flags);
|
||||
|
||||
/* Sanity check. */
|
||||
if(rp->p_vmrequest.vmresult == OK) {
|
||||
if(CHECKRANGE(p,
|
||||
rp->p_vmrequest.start,
|
||||
rp->p_vmrequest.length,
|
||||
rp->p_vmrequest.writeflag) != OK) {
|
||||
kprintf("SYSTEM: request %d:0x%lx-0x%lx, wrflag %d, failed\n",
|
||||
rp->p_endpoint,
|
||||
rp->p_vmrequest.start, rp->p_vmrequest.start + rp->p_vmrequest.length,
|
||||
rp->p_vmrequest.writeflag);
|
||||
if(p->p_vmrequest.vmresult == OK) {
|
||||
int r;
|
||||
vmassert(!verifyrange);
|
||||
verifyrange = 1;
|
||||
r = CHECKRANGE(target,
|
||||
p->p_vmrequest.start,
|
||||
p->p_vmrequest.length,
|
||||
p->p_vmrequest.writeflag);
|
||||
vmassert(verifyrange);
|
||||
verifyrange = 0;
|
||||
|
||||
if(r != OK) {
|
||||
|
||||
kprintf("SYSTEM: request by %d: on ep %d: 0x%lx-0x%lx, wrflag %d, stack %s, failed\n",
|
||||
p->p_endpoint, target->p_endpoint,
|
||||
p->p_vmrequest.start, p->p_vmrequest.start + p->p_vmrequest.length,
|
||||
p->p_vmrequest.writeflag,
|
||||
p->p_vmrequest.stacktrace);
|
||||
|
||||
printf("printing pt of %d (0x%lx)\n",
|
||||
vm_print(target->p_endpoint),
|
||||
target->p_seg.p_cr3
|
||||
);
|
||||
vm_print(target->p_seg.p_cr3);
|
||||
minix_panic("SYSTEM: fail but VM said OK", NO_NUM);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
vmassert(RTS_ISSET(target, VMREQTARGET));
|
||||
RTS_LOCK_UNSET(target, VMREQTARGET);
|
||||
|
||||
if(p->p_vmrequest.type == VMSTYPE_KERNELCALL) {
|
||||
/* Put on restart chain. */
|
||||
p->p_vmrequest.nextrestart = vmrestart;
|
||||
vmrestart = p;
|
||||
} else if(p->p_vmrequest.type == VMSTYPE_DELIVERMSG) {
|
||||
vmassert(p->p_misc_flags & MF_DELIVERMSG);
|
||||
vmassert(p == target);
|
||||
vmassert(RTS_ISSET(p, VMREQUEST));
|
||||
vmassert(RTS_ISSET(p, VMREQTARGET));
|
||||
RTS_LOCK_UNSET(p, VMREQUEST);
|
||||
RTS_LOCK_UNSET(target, VMREQTARGET);
|
||||
} else {
|
||||
#if DEBUG_VMASSERT
|
||||
printf("suspended with stack: %s\n",
|
||||
p->p_vmrequest.stacktrace);
|
||||
#endif
|
||||
minix_panic("strange request type",
|
||||
p->p_vmrequest.type);
|
||||
}
|
||||
|
||||
return OK;
|
||||
case VMCTL_ENABLE_PAGING:
|
||||
if(vm_running)
|
||||
@ -105,8 +161,13 @@ kprintf("SYSTEM: request %d:0x%lx-0x%lx, wrflag %d, failed\n",
|
||||
vm_init(p);
|
||||
if(!vm_running)
|
||||
minix_panic("do_vmctl: paging enabling failed", NO_NUM);
|
||||
vmassert(p->p_delivermsg_lin ==
|
||||
umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
|
||||
if(newmap(p, m_ptr->SVMCTL_VALUE) != OK)
|
||||
minix_panic("do_vmctl: newmap failed", NO_NUM);
|
||||
p->p_delivermsg_lin =
|
||||
umap_local(p, D, p->p_delivermsg_vir, sizeof(message));
|
||||
vmassert(p->p_delivermsg_lin);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
/* Define stack sizes for the kernel tasks included in the system image. */
|
||||
#define NO_STACK 0
|
||||
#define SMALL_STACK (256 * sizeof(char *))
|
||||
#define SMALL_STACK (1024 * sizeof(char *))
|
||||
#define IDL_S SMALL_STACK /* 3 intr, 3 temps, 4 db for Intel */
|
||||
#define HRD_S NO_STACK /* dummy task, uses kernel stack */
|
||||
#define TSK_S SMALL_STACK /* system and clock task */
|
||||
|
@ -43,7 +43,7 @@ PUBLIC int sys_vmctl_get_cr3_i386(endpoint_t who, u32_t *cr3)
|
||||
}
|
||||
|
||||
PUBLIC int sys_vmctl_get_memreq(endpoint_t *who, vir_bytes *mem,
|
||||
vir_bytes *len, int *wrflag)
|
||||
vir_bytes *len, int *wrflag, endpoint_t *requestor)
|
||||
{
|
||||
message m;
|
||||
int r;
|
||||
@ -56,7 +56,16 @@ PUBLIC int sys_vmctl_get_memreq(endpoint_t *who, vir_bytes *mem,
|
||||
*mem = (vir_bytes) m.SVMCTL_MRG_ADDR;
|
||||
*len = m.SVMCTL_MRG_LEN;
|
||||
*wrflag = m.SVMCTL_MRG_WRITE;
|
||||
*requestor = (endpoint_t) m.SVMCTL_MRG_REQUESTOR;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
PUBLIC int sys_vmctl_enable_paging(struct mem_map *map)
|
||||
{
|
||||
message m;
|
||||
m.SVMCTL_WHO = SELF;
|
||||
m.SVMCTL_PARAM = VMCTL_ENABLE_PAGING;
|
||||
m.SVMCTL_VALUE = (int) map;
|
||||
return _taskcall(SYSTASK, SYS_VMCTL, &m);
|
||||
}
|
||||
|
@ -290,8 +290,11 @@ PRIVATE void get_work()
|
||||
continue;
|
||||
}
|
||||
if(who_p >= 0 && fproc[who_p].fp_endpoint != who_e) {
|
||||
printf("FS: receive endpoint inconsistent (%d, %d, %d).\n",
|
||||
who_e, fproc[who_p].fp_endpoint, who_e);
|
||||
if(fproc[who_p].fp_endpoint == NONE) {
|
||||
printf("slot unknown even\n");
|
||||
}
|
||||
printf("FS: receive endpoint inconsistent (source %d, who_p %d, stored ep %d, who_e %d).\n",
|
||||
m_in.m_source, who_p, fproc[who_p].fp_endpoint, who_e);
|
||||
#if 0
|
||||
panic(__FILE__, "FS: inconsistent endpoint ", NO_NUM);
|
||||
#endif
|
||||
|
@ -145,9 +145,12 @@ PUBLIC int do_fork(message *msg)
|
||||
}
|
||||
|
||||
if(fullvm) {
|
||||
if(handle_memory(vmc, msgaddr, sizeof(message), 1) != OK)
|
||||
vir_bytes vir;
|
||||
vir = arch_vir2map(vmc, msgaddr);
|
||||
if(handle_memory(vmc, vir, sizeof(message), 1) != OK)
|
||||
vm_panic("can't make message writable (child)", NO_NUM);
|
||||
if(handle_memory(vmp, msgaddr, sizeof(message), 1) != OK)
|
||||
vir = arch_vir2map(vmp, msgaddr);
|
||||
if(handle_memory(vmp, vir, sizeof(message), 1) != OK)
|
||||
vm_panic("can't make message writable (parent)", NO_NUM);
|
||||
if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
|
||||
vm_panic("fork can't pt_bind", r);
|
||||
|
@ -719,10 +719,8 @@ PUBLIC void pt_init(void)
|
||||
pt_bind(newpt, vmp);
|
||||
|
||||
/* Now actually enable paging. */
|
||||
if((r=sys_vmctl(SELF, VMCTL_ENABLE_PAGING,
|
||||
vmp->vm_arch.vm_seg)) != OK) {
|
||||
vm_panic("VMCTL_ENABLE_PAGING failed", r);
|
||||
}
|
||||
if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK)
|
||||
vm_panic("pt_init: enable paging failed", NO_NUM);
|
||||
|
||||
/* Back to reality - this is where the stack actually is. */
|
||||
vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;
|
||||
|
@ -118,6 +118,8 @@ PUBLIC int main(void)
|
||||
* verified, and/or pagefaults handled.
|
||||
*/
|
||||
do_memory();
|
||||
break;
|
||||
case HARDWARE:
|
||||
do_pagefaults();
|
||||
break;
|
||||
case PM_PROC_NR:
|
||||
|
@ -61,6 +61,11 @@ PUBLIC void do_pagefaults(void)
|
||||
vir_bytes offset;
|
||||
int p, wr = PFERR_WRITE(err);
|
||||
|
||||
#if 0
|
||||
printf("VM: pagefault: ep %d 0x%lx %s\n",
|
||||
ep, arch_map2vir(vmp, addr), pf_errstr(err));
|
||||
#endif
|
||||
|
||||
if(vm_isokendpt(ep, &p) != OK)
|
||||
vm_panic("do_pagefaults: endpoint wrong", ep);
|
||||
|
||||
@ -104,9 +109,11 @@ PUBLIC void do_pagefaults(void)
|
||||
vm_panic("sys_kill failed", s);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
printf("VM: map_pf done; ep %d 0x%lx %s\n",
|
||||
ep, arch_map2vir(vmp, addr), pf_errstr(err));
|
||||
|
||||
|
||||
printf("VM: handling pagefault OK: %d addr 0x%lx %s\n",
|
||||
ep, arch_map2vir(vmp, addr), pf_errstr(err));
|
||||
#endif
|
||||
@ -126,12 +133,13 @@ PUBLIC void do_pagefaults(void)
|
||||
PUBLIC void do_memory(void)
|
||||
{
|
||||
int r, s;
|
||||
endpoint_t who;
|
||||
endpoint_t who, requestor;
|
||||
vir_bytes mem;
|
||||
vir_bytes len;
|
||||
int wrflag;
|
||||
|
||||
while((r=sys_vmctl_get_memreq(&who, &mem, &len, &wrflag)) == OK) {
|
||||
while((r=sys_vmctl_get_memreq(&who, &mem, &len, &wrflag, &requestor))
|
||||
== OK) {
|
||||
int p, r = OK;
|
||||
struct vmproc *vmp;
|
||||
|
||||
@ -141,7 +149,7 @@ PUBLIC void do_memory(void)
|
||||
|
||||
r = handle_memory(vmp, mem, len, wrflag);
|
||||
|
||||
if(sys_vmctl(who, VMCTL_MEMREQ_REPLY, r) != OK)
|
||||
if(sys_vmctl(requestor, VMCTL_MEMREQ_REPLY, r) != OK)
|
||||
vm_panic("do_memory: sys_vmctl failed", r);
|
||||
|
||||
#if 0
|
||||
@ -170,6 +178,7 @@ int handle_memory(struct vmproc *vmp, vir_bytes mem, vir_bytes len, int wrflag)
|
||||
if(o > 0) len += VM_PAGE_SIZE - o;
|
||||
|
||||
if(!(region = map_lookup(vmp, mem))) {
|
||||
map_printmap(vmp);
|
||||
printf("VM: do_memory: memory doesn't exist\n");
|
||||
r = EFAULT;
|
||||
} else if(mem + len > region->vaddr + region->length) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user