- code shared with exec() letting boot-time processes have
their own fully fledged virtual address space and freeing their pre-allocated heap+stack area (necessary to let memory driver map in arbitrary areas of memory for /dev/mem without sys_vm_map) - small optimization preallocating memory on exec - finished VR_DIRECT physical mapping code
This commit is contained in:
		
							parent
							
								
									f4d0d635fd
								
							
						
					
					
						commit
						68d0c4defe
					
				@ -127,7 +127,7 @@ vir_bytes sp;			/* new value of sp */
 | 
			
		||||
	if(old_clicks < data_clicks) {
 | 
			
		||||
		vir_bytes more;
 | 
			
		||||
		more = (data_clicks - old_clicks) << CLICK_SHIFT;
 | 
			
		||||
		if(map_region_extend(rmp->vm_heap, more) != OK) {
 | 
			
		||||
		if(map_region_extend(rmp, rmp->vm_heap, more) != OK) {
 | 
			
		||||
			printf("VM: brk: map_region_extend failed\n");
 | 
			
		||||
			return ENOMEM;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
@ -32,7 +32,6 @@
 | 
			
		||||
FORWARD _PROTOTYPE( int new_mem, (struct vmproc *vmp, struct vmproc *sh_vmp,
 | 
			
		||||
	vir_bytes text_bytes, vir_bytes data_bytes, vir_bytes bss_bytes,
 | 
			
		||||
	vir_bytes stk_bytes, phys_bytes tot_bytes)	);
 | 
			
		||||
FORWARD _PROTOTYPE( u32_t find_kernel_top, (void));
 | 
			
		||||
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *                              find_share                                   *
 | 
			
		||||
@ -168,7 +167,6 @@ phys_bytes tot_bytes;		/* total memory to allocate, including gap */
 | 
			
		||||
  vir_clicks text_clicks, data_clicks, gap_clicks, stack_clicks, tot_clicks;
 | 
			
		||||
  phys_bytes bytes, base, bss_offset;
 | 
			
		||||
  int s, r2;
 | 
			
		||||
  static u32_t kernel_top = 0;
 | 
			
		||||
 | 
			
		||||
  SANITYCHECK(SCL_FUNCTIONS);
 | 
			
		||||
 | 
			
		||||
@ -218,108 +216,20 @@ SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
   */
 | 
			
		||||
 | 
			
		||||
  if(vm_paged) {
 | 
			
		||||
	vir_bytes hole_clicks;
 | 
			
		||||
 | 
			
		||||
	if(pt_new(&rmp->vm_pt) != OK)
 | 
			
		||||
		vm_panic("exec_newmem: no new pagetable", NO_NUM);
 | 
			
		||||
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
 | 
			
		||||
	if(!map_proc_kernel(rmp)) {
 | 
			
		||||
		printf("VM: exec: map_proc_kernel failed\n");
 | 
			
		||||
		return ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if(!kernel_top)
 | 
			
		||||
		kernel_top = find_kernel_top();
 | 
			
		||||
 | 
			
		||||
	/* Place text at kernel top. */
 | 
			
		||||
	rmp->vm_arch.vm_seg[T].mem_phys = kernel_top;
 | 
			
		||||
	rmp->vm_arch.vm_seg[T].mem_vir = 0;
 | 
			
		||||
	rmp->vm_arch.vm_seg[T].mem_len = text_clicks;
 | 
			
		||||
 | 
			
		||||
	rmp->vm_offset = CLICK2ABS(kernel_top);
 | 
			
		||||
 | 
			
		||||
	vm_assert(!sh_mp);
 | 
			
		||||
	/* page mapping flags for code */
 | 
			
		||||
#define TEXTFLAGS (PTF_PRESENT | PTF_USER | PTF_WRITE)
 | 
			
		||||
	proc_new(rmp,
 | 
			
		||||
	 kernel_top_bytes,	/* where to start the process in the page table */
 | 
			
		||||
	 CLICK2ABS(text_clicks),/* how big is the text in bytes, page-aligned */
 | 
			
		||||
	 CLICK2ABS(data_clicks),/* how big is data+bss, page-aligned */
 | 
			
		||||
	 CLICK2ABS(stack_clicks),/* how big is stack, page-aligned */
 | 
			
		||||
	 CLICK2ABS(gap_clicks),	/* how big is gap, page-aligned */
 | 
			
		||||
	 0,0,			/* not preallocated */
 | 
			
		||||
	 VM_STACKTOP		/* regular stack top */
 | 
			
		||||
	 );
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
	if(text_clicks > 0) {
 | 
			
		||||
		if(!map_page_region(rmp, CLICK2ABS(kernel_top), 0,
 | 
			
		||||
		  CLICK2ABS(rmp->vm_arch.vm_seg[T].mem_len), 0,
 | 
			
		||||
		  VR_ANON | VR_WRITABLE, 0)) {
 | 
			
		||||
			SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
			printf("VM: map_page_region failed (text)\n");
 | 
			
		||||
			return(ENOMEM);
 | 
			
		||||
		}
 | 
			
		||||
		SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
	}
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for data (including bss, but not including gap
 | 
			
		||||
	 * or stack), make sure it's cleared, and map it in after text
 | 
			
		||||
	 * (if any).
 | 
			
		||||
	 */
 | 
			
		||||
	if(!(rmp->vm_heap = map_page_region(rmp,
 | 
			
		||||
	  CLICK2ABS(kernel_top + text_clicks), 0,
 | 
			
		||||
	  CLICK2ABS(data_clicks), 0, VR_ANON | VR_WRITABLE, 0))) {
 | 
			
		||||
		printf("VM: exec: map_page_region for data failed\n");
 | 
			
		||||
		return ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	map_region_set_tag(rmp->vm_heap, VRT_HEAP);
 | 
			
		||||
 | 
			
		||||
	/* How many address space clicks between end of data
 | 
			
		||||
	 * and start of stack?
 | 
			
		||||
	 * VM_STACKTOP is the first address after the stack, as addressed
 | 
			
		||||
	 * from within the user process.
 | 
			
		||||
	 */
 | 
			
		||||
	hole_clicks = VM_STACKTOP >> CLICK_SHIFT;
 | 
			
		||||
	hole_clicks -= data_clicks + stack_clicks + gap_clicks;
 | 
			
		||||
 | 
			
		||||
	if(!map_page_region(rmp,
 | 
			
		||||
	  CLICK2ABS(kernel_top + text_clicks + data_clicks + hole_clicks),
 | 
			
		||||
	  0, CLICK2ABS(stack_clicks+gap_clicks), 0,
 | 
			
		||||
	  VR_ANON | VR_WRITABLE, 0) != OK) {
 | 
			
		||||
	  	vm_panic("map_page_region failed for stack", NO_NUM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rmp->vm_arch.vm_seg[D].mem_phys = kernel_top + text_clicks;
 | 
			
		||||
	rmp->vm_arch.vm_seg[D].mem_vir = 0;
 | 
			
		||||
	rmp->vm_arch.vm_seg[D].mem_len = data_clicks;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	rmp->vm_arch.vm_seg[S].mem_phys = kernel_top +
 | 
			
		||||
		text_clicks + data_clicks + gap_clicks + hole_clicks;
 | 
			
		||||
	rmp->vm_arch.vm_seg[S].mem_vir = data_clicks + gap_clicks + hole_clicks;
 | 
			
		||||
	
 | 
			
		||||
	/* Pretend the stack is the full size of the data segment, so 
 | 
			
		||||
	 * we get a full-sized data segment, up to VM_DATATOP.
 | 
			
		||||
	 * After sys_newmap(),, change the stack to what we know the
 | 
			
		||||
	 * stack to be (up to VM_STACKTOP).
 | 
			
		||||
	 */
 | 
			
		||||
	rmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
 | 
			
		||||
		rmp->vm_arch.vm_seg[S].mem_vir - kernel_top - text_clicks;
 | 
			
		||||
 | 
			
		||||
	/* Where are we allowed to start using the rest of the virtual
 | 
			
		||||
	 * address space?
 | 
			
		||||
	 */
 | 
			
		||||
	rmp->vm_stacktop = VM_STACKTOP;
 | 
			
		||||
 | 
			
		||||
	/* What is the final size of the data segment in bytes? */
 | 
			
		||||
	rmp->vm_arch.vm_data_top = 
 | 
			
		||||
		(rmp->vm_arch.vm_seg[S].mem_vir + 
 | 
			
		||||
		rmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
 | 
			
		||||
 | 
			
		||||
	rmp->vm_flags |= VMF_HASPT;
 | 
			
		||||
 | 
			
		||||
	if((s=sys_newmap(rmp->vm_endpoint, rmp->vm_arch.vm_seg)) != OK) {
 | 
			
		||||
		vm_panic("sys_newmap (vm) failed", s);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	/* This is the real stack clicks. */
 | 
			
		||||
	rmp->vm_arch.vm_seg[S].mem_len = stack_clicks;
 | 
			
		||||
 | 
			
		||||
  } else {
 | 
			
		||||
  	phys_clicks new_base;
 | 
			
		||||
 | 
			
		||||
@ -380,11 +290,11 @@ SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
	  if ((s=sys_memset(0, base, bytes)) != OK) {
 | 
			
		||||
		vm_panic("new_mem can't zero", s);
 | 
			
		||||
	  }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /* Whether vm_pt is NULL or a new pagetable, tell kernel about it. */
 | 
			
		||||
  if((s=pt_bind(&rmp->vm_pt, rmp)) != OK)
 | 
			
		||||
	vm_panic("exec_newmem: pt_bind failed", s);
 | 
			
		||||
	  /* Tell kernel this thing has no page table. */
 | 
			
		||||
	  if((s=pt_bind(NULL, rmp)) != OK)
 | 
			
		||||
		vm_panic("exec_newmem: pt_bind failed", s);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
SANITYCHECK(SCL_FUNCTIONS);
 | 
			
		||||
 | 
			
		||||
@ -394,7 +304,7 @@ SANITYCHECK(SCL_FUNCTIONS);
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *				find_kernel_top				     *
 | 
			
		||||
 *===========================================================================*/
 | 
			
		||||
PRIVATE u32_t find_kernel_top(void)
 | 
			
		||||
PUBLIC phys_bytes find_kernel_top(void)
 | 
			
		||||
{
 | 
			
		||||
/* Find out where the kernel is, so we know where to start mapping
 | 
			
		||||
 * user processes.
 | 
			
		||||
@ -408,6 +318,128 @@ PRIVATE u32_t find_kernel_top(void)
 | 
			
		||||
	kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, S));
 | 
			
		||||
	vm_assert(kernel_top);
 | 
			
		||||
 | 
			
		||||
	return kernel_top;
 | 
			
		||||
	return CLICK2ABS(kernel_top);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *				proc_new				     *
 | 
			
		||||
 *===========================================================================*/
 | 
			
		||||
PUBLIC int proc_new(struct vmproc *vmp,
 | 
			
		||||
  phys_bytes vstart,	  /* where to start the process in page table */
 | 
			
		||||
  phys_bytes text_bytes,  /* how much code, in bytes but page aligned */
 | 
			
		||||
  phys_bytes data_bytes,  /* how much data + bss, in bytes but page aligned */
 | 
			
		||||
  phys_bytes stack_bytes, /* stack space to reserve, in bytes, page aligned */
 | 
			
		||||
  phys_bytes gap_bytes,   /* gap bytes, page aligned */
 | 
			
		||||
  phys_bytes text_start,  /* text starts here, if preallocated, otherwise 0 */
 | 
			
		||||
  phys_bytes data_start,  /* data starts here, if preallocated, otherwise 0 */
 | 
			
		||||
  phys_bytes stacktop
 | 
			
		||||
)
 | 
			
		||||
{
 | 
			
		||||
	int s;
 | 
			
		||||
	vir_bytes hole_bytes;
 | 
			
		||||
	int prealloc;
 | 
			
		||||
 | 
			
		||||
	vm_assert(!(vstart % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(text_bytes % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(data_bytes % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(stack_bytes % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(gap_bytes % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(text_start % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(data_start % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert((!text_start && !data_start) || (text_start && data_start));
 | 
			
		||||
 | 
			
		||||
	if(!map_proc_kernel(vmp)) {
 | 
			
		||||
		printf("VM: exec: map_proc_kernel failed\n");
 | 
			
		||||
		return ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Place text at start of process. */
 | 
			
		||||
	vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
 | 
			
		||||
	vmp->vm_arch.vm_seg[T].mem_vir = 0;
 | 
			
		||||
	vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes);
 | 
			
		||||
 | 
			
		||||
	vmp->vm_offset = vstart;
 | 
			
		||||
 | 
			
		||||
	/* page mapping flags for code */
 | 
			
		||||
#define TEXTFLAGS (PTF_PRESENT | PTF_USER | PTF_WRITE)
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
	if(text_bytes > 0) {
 | 
			
		||||
		if(!map_page_region(vmp, vstart, 0, text_bytes,
 | 
			
		||||
		  text_start ? text_start : MAP_NONE,
 | 
			
		||||
		  VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC)) {
 | 
			
		||||
			SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
			printf("VM: proc_new: map_page_region failed (text)\n");
 | 
			
		||||
			return(ENOMEM);
 | 
			
		||||
		}
 | 
			
		||||
		SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
	}
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for data (including bss, but not including gap
 | 
			
		||||
	 * or stack), make sure it's cleared, and map it in after text
 | 
			
		||||
	 * (if any).
 | 
			
		||||
	 */
 | 
			
		||||
	if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0,
 | 
			
		||||
	  data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE,
 | 
			
		||||
		data_start ? 0 : MF_PREALLOC))) {
 | 
			
		||||
		printf("VM: exec: map_page_region for data failed\n");
 | 
			
		||||
		return ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Tag the heap so brk() call knows which region to extend. */
 | 
			
		||||
	map_region_set_tag(vmp->vm_heap, VRT_HEAP);
 | 
			
		||||
 | 
			
		||||
	/* How many address space clicks between end of data
 | 
			
		||||
	 * and start of stack?
 | 
			
		||||
	 * stacktop is the first address after the stack, as addressed
 | 
			
		||||
	 * from within the user process.
 | 
			
		||||
	 */
 | 
			
		||||
	hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
 | 
			
		||||
 | 
			
		||||
	if(!map_page_region(vmp, vstart + text_bytes + data_bytes + hole_bytes,
 | 
			
		||||
	  0, stack_bytes + gap_bytes, MAP_NONE,
 | 
			
		||||
	  VR_ANON | VR_WRITABLE, 0) != OK) {
 | 
			
		||||
	  	vm_panic("map_page_region failed for stack", NO_NUM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
 | 
			
		||||
	vmp->vm_arch.vm_seg[D].mem_vir = 0;
 | 
			
		||||
	vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
 | 
			
		||||
 | 
			
		||||
	vmp->vm_arch.vm_seg[S].mem_phys = ABS2CLICK(vstart +
 | 
			
		||||
		text_bytes + data_bytes + gap_bytes + hole_bytes);
 | 
			
		||||
	vmp->vm_arch.vm_seg[S].mem_vir = ABS2CLICK(data_bytes + gap_bytes + hole_bytes);
 | 
			
		||||
 | 
			
		||||
	/* Pretend the stack is the full size of the data segment, so 
 | 
			
		||||
	 * we get a full-sized data segment, up to VM_DATATOP.
 | 
			
		||||
	 * After sys_newmap(), change the stack to what we know the
 | 
			
		||||
	 * stack to be (up to stacktop).
 | 
			
		||||
	 */
 | 
			
		||||
	vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
 | 
			
		||||
		vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
 | 
			
		||||
 | 
			
		||||
	/* Where are we allowed to start using the rest of the virtual
 | 
			
		||||
	 * address space?
 | 
			
		||||
	 */
 | 
			
		||||
	vmp->vm_stacktop = stacktop;
 | 
			
		||||
 | 
			
		||||
	/* What is the final size of the data segment in bytes? */
 | 
			
		||||
	vmp->vm_arch.vm_data_top = 
 | 
			
		||||
		(vmp->vm_arch.vm_seg[S].mem_vir + 
 | 
			
		||||
		vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
 | 
			
		||||
 | 
			
		||||
	vmp->vm_flags |= VMF_HASPT;
 | 
			
		||||
 | 
			
		||||
	if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK) {
 | 
			
		||||
		vm_panic("sys_newmap (vm) failed", s);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	/* This is the real stack clicks. */
 | 
			
		||||
	vmp->vm_arch.vm_seg[S].mem_len = ABS2CLICK(stack_bytes);
 | 
			
		||||
 | 
			
		||||
	if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
 | 
			
		||||
		vm_panic("exec_newmem: pt_bind failed", s);
 | 
			
		||||
 | 
			
		||||
	return OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -26,4 +26,4 @@ int verbosealloc;
 | 
			
		||||
 | 
			
		||||
/* vm operation mode state and values */
 | 
			
		||||
EXTERN long vm_paged;
 | 
			
		||||
 | 
			
		||||
EXTERN phys_bytes kernel_top_bytes;
 | 
			
		||||
 | 
			
		||||
@ -15,12 +15,15 @@
 | 
			
		||||
#include <minix/ipc.h>
 | 
			
		||||
#include <minix/sysutil.h>
 | 
			
		||||
#include <minix/syslib.h>
 | 
			
		||||
#include <minix/const.h>
 | 
			
		||||
 | 
			
		||||
#include <errno.h>
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include <env.h>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
 | 
			
		||||
#include <memory.h>
 | 
			
		||||
 | 
			
		||||
#define _MAIN 1
 | 
			
		||||
#include "glo.h"
 | 
			
		||||
#include "proto.h"
 | 
			
		||||
@ -33,9 +36,17 @@
 | 
			
		||||
#include "../../kernel/config.h" 
 | 
			
		||||
#include "../../kernel/proc.h"
 | 
			
		||||
 | 
			
		||||
typedef u32_t mask_t;
 | 
			
		||||
#define MINEPM 0
 | 
			
		||||
#define MAXMASK (sizeof(mask_t)*8)
 | 
			
		||||
#define ANYEPM (MINEPM+MAXMASK-1)
 | 
			
		||||
#define MAXEPM (ANYEPM-1)
 | 
			
		||||
#define EPM(e) ((1L) << ((e)-MINEPM))
 | 
			
		||||
#define EPMOK(mask, ep) (((mask) & EPM(ANYEPM)) || ((ep) >= MINEPM && (ep) <= MAXEPM && (EPM(ep) & (mask))))
 | 
			
		||||
 | 
			
		||||
/* Table of calls and a macro to test for being in range. */
 | 
			
		||||
struct {
 | 
			
		||||
	endpoint_t vmc_caller;		/* Process that does this, or ANY */
 | 
			
		||||
	mask_t vmc_callers;		/* bitmap of endpoint numbers */
 | 
			
		||||
	int (*vmc_func)(message *);	/* Call handles message. */
 | 
			
		||||
	char *vmc_name;			/* Human-readable string. */
 | 
			
		||||
} vm_calls[VM_NCALLS];
 | 
			
		||||
@ -92,10 +103,6 @@ PUBLIC int main(void)
 | 
			
		||||
  	if ((r=receive(ANY, &msg)) != OK)
 | 
			
		||||
		vm_panic("receive() error", r);
 | 
			
		||||
 | 
			
		||||
	if(msg.m_source == LOG_PROC_NR ||
 | 
			
		||||
		msg.m_source == TTY_PROC_NR)
 | 
			
		||||
		continue;
 | 
			
		||||
 | 
			
		||||
	SANITYCHECK(SCL_DETAIL);
 | 
			
		||||
 | 
			
		||||
	if(msg.m_type & NOTIFY_MESSAGE) {
 | 
			
		||||
@ -131,12 +138,10 @@ PUBLIC int main(void)
 | 
			
		||||
	if((c=CALLNUMBER(msg.m_type)) < 0 || !vm_calls[c].vmc_func) {
 | 
			
		||||
		printf("VM: out of range or missing callnr %d from %d\n",
 | 
			
		||||
			msg.m_type, msg.m_source);
 | 
			
		||||
	} else if(vm_calls[c].vmc_caller != ANY &&
 | 
			
		||||
		vm_calls[c].vmc_caller != msg.m_source) {
 | 
			
		||||
		printf("VM: restricted callnr %d (%s) from %d instead of %d\n",
 | 
			
		||||
			c,
 | 
			
		||||
	} else if(!EPMOK(vm_calls[c].vmc_callers, msg.m_source)) {
 | 
			
		||||
		printf("VM: restricted call %s from %d instead of 0x%lx\n",
 | 
			
		||||
			vm_calls[c].vmc_name, msg.m_source,
 | 
			
		||||
			vm_calls[c].vmc_caller);
 | 
			
		||||
			vm_calls[c].vmc_callers);
 | 
			
		||||
	} else {
 | 
			
		||||
	SANITYCHECK(SCL_FUNCTIONS);
 | 
			
		||||
		result = vm_calls[c].vmc_func(&msg);
 | 
			
		||||
@ -170,7 +175,6 @@ PRIVATE void vm_init(void)
 | 
			
		||||
	struct memory mem_chunks[NR_MEMS];
 | 
			
		||||
	struct boot_image image[NR_BOOT_PROCS];
 | 
			
		||||
	struct boot_image *ip;
 | 
			
		||||
	struct vmproc *vmp;
 | 
			
		||||
 | 
			
		||||
	/* Get chunks of available memory. */
 | 
			
		||||
	get_mem_chunks(mem_chunks);
 | 
			
		||||
@ -189,19 +193,24 @@ PRIVATE void vm_init(void)
 | 
			
		||||
	 * now and make valid slot entries for them.
 | 
			
		||||
	 */
 | 
			
		||||
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
 | 
			
		||||
		struct vmproc *vmp;
 | 
			
		||||
 | 
			
		||||
		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
 | 
			
		||||
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;
 | 
			
		||||
 | 
			
		||||
#define GETVMP(v, nr)						\
 | 
			
		||||
		if(nr >= 0) {					\
 | 
			
		||||
			vmp = &vmproc[ip->proc_nr];		\
 | 
			
		||||
		} else if(nr == SYSTEM) {			\
 | 
			
		||||
			vmp = &vmproc[VMP_SYSTEM];		\
 | 
			
		||||
		} else {					\
 | 
			
		||||
			vm_panic("init: crazy proc_nr", nr);	\
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Initialize normal process table slot or special SYSTEM
 | 
			
		||||
		 * table slot. Kernel memory is already reserved.
 | 
			
		||||
		 */
 | 
			
		||||
		if(ip->proc_nr >= 0) {
 | 
			
		||||
			vmp = &vmproc[ip->proc_nr];
 | 
			
		||||
		} else if(ip->proc_nr == SYSTEM) {
 | 
			
		||||
			vmp = &vmproc[VMP_SYSTEM];
 | 
			
		||||
		} else {
 | 
			
		||||
			vm_panic("init: crazy proc_nr", ip->proc_nr);
 | 
			
		||||
		}
 | 
			
		||||
		GETVMP(vmp, ip->proc_nr);
 | 
			
		||||
 | 
			
		||||
		/* reset fields as if exited */
 | 
			
		||||
		clear_proc(vmp);
 | 
			
		||||
@ -233,13 +242,67 @@ PRIVATE void vm_init(void)
 | 
			
		||||
	/* Initialize tables to all physical memory. */
 | 
			
		||||
	mem_init(mem_chunks);
 | 
			
		||||
 | 
			
		||||
	/* Bits of code need to know where a process can
 | 
			
		||||
	 * start in a pagetable.
 | 
			
		||||
	 */
 | 
			
		||||
        kernel_top_bytes = find_kernel_top();
 | 
			
		||||
 | 
			
		||||
	/* Give these processes their own page table. */
 | 
			
		||||
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
 | 
			
		||||
		int s;
 | 
			
		||||
		struct vmproc *vmp;
 | 
			
		||||
		vir_bytes old_stacktop, old_stack;
 | 
			
		||||
 | 
			
		||||
		if(ip->proc_nr < 0) continue;
 | 
			
		||||
 | 
			
		||||
		GETVMP(vmp, ip->proc_nr);
 | 
			
		||||
 | 
			
		||||
		old_stack = 
 | 
			
		||||
			vmp->vm_arch.vm_seg[S].mem_vir +
 | 
			
		||||
			vmp->vm_arch.vm_seg[S].mem_len - 
 | 
			
		||||
			vmp->vm_arch.vm_seg[D].mem_len;
 | 
			
		||||
 | 
			
		||||
		if(!(ip->flags & PROC_FULLVM))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
        	if(pt_new(&vmp->vm_pt) != OK)
 | 
			
		||||
			vm_panic("vm_init: no new pagetable", NO_NUM);
 | 
			
		||||
#define BASICSTACK VM_PAGE_SIZE
 | 
			
		||||
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
 | 
			
		||||
				vmp->vm_arch.vm_seg[S].mem_len);
 | 
			
		||||
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
 | 
			
		||||
			VM_STACKTOP - old_stacktop) != OK) {
 | 
			
		||||
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
 | 
			
		||||
			vmp->vm_arch.vm_seg[D].mem_len,
 | 
			
		||||
			old_stack);
 | 
			
		||||
 | 
			
		||||
		proc_new(vmp,
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
 | 
			
		||||
			BASICSTACK,
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
 | 
			
		||||
				vmp->vm_arch.vm_seg[S].mem_len -
 | 
			
		||||
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
 | 
			
		||||
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
 | 
			
		||||
				VM_STACKTOP);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Set up table of calls. */
 | 
			
		||||
#define CALLMAP(code, func, thecaller) { int i;			      \
 | 
			
		||||
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
 | 
			
		||||
	if(vm_calls[i].vmc_func) { vm_panic("dup " #code , (code)); }  \
 | 
			
		||||
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
 | 
			
		||||
	vm_calls[i].vmc_func = (func); 				      \
 | 
			
		||||
	vm_calls[i].vmc_name = #code; 				      \
 | 
			
		||||
	vm_calls[i].vmc_caller = (thecaller);			      \
 | 
			
		||||
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
 | 
			
		||||
		&& (thecaller) != ANYEPM) {				\
 | 
			
		||||
		vm_panic(#thecaller " invalid", (code));  		\
 | 
			
		||||
	}								\
 | 
			
		||||
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
	/* Set call table to 0. This invalidates all calls (clear
 | 
			
		||||
@ -259,12 +322,17 @@ PRIVATE void vm_init(void)
 | 
			
		||||
	CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
 | 
			
		||||
	CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
 | 
			
		||||
 | 
			
		||||
	/* Requests from tty device driver (/dev/video). */
 | 
			
		||||
	/* Physical mapping requests.
 | 
			
		||||
	 * tty (for /dev/video) does this.
 | 
			
		||||
	 * memory (for /dev/mem) does this.
 | 
			
		||||
	 */
 | 
			
		||||
	CALLMAP(VM_MAP_PHYS, do_map_phys, TTY_PROC_NR);
 | 
			
		||||
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, TTY_PROC_NR);
 | 
			
		||||
	CALLMAP(VM_MAP_PHYS, do_map_phys, MEM_PROC_NR);
 | 
			
		||||
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, MEM_PROC_NR);
 | 
			
		||||
 | 
			
		||||
	/* Requests from userland (source unrestricted). */
 | 
			
		||||
	CALLMAP(VM_MMAP, do_mmap, ANY);
 | 
			
		||||
	CALLMAP(VM_MMAP, do_mmap, ANYEPM);
 | 
			
		||||
 | 
			
		||||
	/* Requests (actually replies) from VFS (restricted to VFS only). */
 | 
			
		||||
	CALLMAP(VM_VFS_REPLY_OPEN, do_vfs_reply, VFS_PROC_NR);
 | 
			
		||||
@ -272,3 +340,9 @@ PRIVATE void vm_init(void)
 | 
			
		||||
	CALLMAP(VM_VFS_REPLY_CLOSE, do_vfs_reply, VFS_PROC_NR);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kputc(int c)
 | 
			
		||||
{
 | 
			
		||||
	if(c == '\n')
 | 
			
		||||
		ser_putc('\r');
 | 
			
		||||
	ser_putc(c);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -69,8 +69,8 @@ PUBLIC int do_mmap(message *m)
 | 
			
		||||
		if(len % VM_PAGE_SIZE)
 | 
			
		||||
			len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		if(!(vr = map_page_region(vmp, vmp->vm_stacktop,
 | 
			
		||||
			VM_DATATOP, len, 0,
 | 
			
		||||
		if(!(vr = map_page_region(vmp,
 | 
			
		||||
			arch_vir2map(vmp, vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE,
 | 
			
		||||
			VR_ANON | VR_WRITABLE, mfflags))) {
 | 
			
		||||
			return ENOMEM;
 | 
			
		||||
		}
 | 
			
		||||
@ -99,20 +99,17 @@ PUBLIC int do_map_phys(message *m)
 | 
			
		||||
	if(target == SELF)
 | 
			
		||||
		target = m->m_source;
 | 
			
		||||
 | 
			
		||||
	if((r=vm_isokendpt(target, &n)) != OK) {
 | 
			
		||||
		printf("do_map_phys: bogus target %d\n", target);
 | 
			
		||||
	if((r=vm_isokendpt(target, &n)) != OK)
 | 
			
		||||
		return EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmp = &vmproc[n];
 | 
			
		||||
 | 
			
		||||
	if(!(vmp->vm_flags & VMF_HASPT))
 | 
			
		||||
		return ENXIO;
 | 
			
		||||
 | 
			
		||||
	if(!(vr = map_page_region(vmp, vmp->vm_stacktop, VM_DATATOP,
 | 
			
		||||
		(vir_bytes) m->VMMP_LEN, (vir_bytes)m->VMMP_PHADDR,
 | 
			
		||||
	if(!(vr = map_page_region(vmp, arch_vir2map(vmp, vmp->vm_stacktop),
 | 
			
		||||
		VM_DATATOP, (vir_bytes) m->VMMP_LEN, (vir_bytes)m->VMMP_PHADDR,
 | 
			
		||||
		VR_DIRECT | VR_NOPF | VR_WRITABLE, 0))) {
 | 
			
		||||
		printf("VM:do_map_phys: map_page_region failed\n");
 | 
			
		||||
		return ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -135,25 +132,21 @@ PUBLIC int do_unmap_phys(message *m)
 | 
			
		||||
	if(target == SELF)
 | 
			
		||||
		target = m->m_source;
 | 
			
		||||
 | 
			
		||||
	if((r=vm_isokendpt(target, &n)) != OK) {
 | 
			
		||||
		printf("VM:do_unmap_phys: bogus target %d\n", target);
 | 
			
		||||
	if((r=vm_isokendpt(target, &n)) != OK)
 | 
			
		||||
		return EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmp = &vmproc[n];
 | 
			
		||||
 | 
			
		||||
	if(!(region = map_lookup(vmp, (vir_bytes) m->VMUM_ADDR))) {
 | 
			
		||||
		printf("VM:do_unmap_phys: map_lookup failed\n");
 | 
			
		||||
	if(!(region = map_lookup(vmp,
 | 
			
		||||
	  arch_vir2map(vmp, (vir_bytes) m->VMUM_ADDR)))) {
 | 
			
		||||
		return EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if(!(region->flags & VR_DIRECT)) {
 | 
			
		||||
		printf("VM:do_unmap_phys: region not a DIRECT mapping\n");
 | 
			
		||||
		return EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if(map_unmap_region(vmp, region) != OK) {
 | 
			
		||||
		printf("VM:do_unmap_phys: map_unmap_region failed\n");
 | 
			
		||||
		return EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -30,7 +30,10 @@
 | 
			
		||||
#include "util.h"
 | 
			
		||||
#include "region.h"
 | 
			
		||||
 | 
			
		||||
static char *pferr(int err)
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *				pf_errstr	     		     	*
 | 
			
		||||
 *===========================================================================*/
 | 
			
		||||
char *pf_errstr(u32_t err)
 | 
			
		||||
{
 | 
			
		||||
	static char buf[100];
 | 
			
		||||
 | 
			
		||||
@ -67,8 +70,8 @@ PUBLIC void handle_pagefaults(void)
 | 
			
		||||
		/* See if address is valid at all. */
 | 
			
		||||
		if(!(region = map_lookup(vmp, addr))) {
 | 
			
		||||
			vm_assert(PFERR_NOPAGE(err));
 | 
			
		||||
			printf("VM: SIGSEGV %d bad addr 0x%lx error 0x%lx\n", 
 | 
			
		||||
				ep, addr, err);
 | 
			
		||||
			printf("VM: SIGSEGV %d bad addr 0x%lx %s\n", 
 | 
			
		||||
				ep, arch_map2vir(vmp, addr), pf_errstr(err));
 | 
			
		||||
			if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
 | 
			
		||||
				vm_panic("sys_kill failed", s);
 | 
			
		||||
			continue;
 | 
			
		||||
@ -81,8 +84,8 @@ PUBLIC void handle_pagefaults(void)
 | 
			
		||||
 | 
			
		||||
		/* If process was writing, see if it's writable. */
 | 
			
		||||
		if(!(region->flags & VR_WRITABLE) && wr) {
 | 
			
		||||
			printf("VM: SIGSEGV %d ro map 0x%lx error 0x%lx\n", 
 | 
			
		||||
				ep, addr, err);
 | 
			
		||||
			printf("VM: SIGSEGV %d ro map 0x%lx %s\n", 
 | 
			
		||||
				ep, arch_map2vir(vmp, addr), pf_errstr(err));
 | 
			
		||||
			if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
 | 
			
		||||
				vm_panic("sys_kill failed", s);
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
@ -51,6 +51,10 @@ _PROTOTYPE( int do_fork, (message *msg)					);
 | 
			
		||||
_PROTOTYPE( struct vmproc *find_share, (struct vmproc *vmp_ign, Ino_t ino,
 | 
			
		||||
                        Dev_t dev, time_t ctime)                        );
 | 
			
		||||
_PROTOTYPE( int do_exec_newmem, (message *msg)				);
 | 
			
		||||
_PROTOTYPE( int proc_new, (struct vmproc *vmp, phys_bytes start,
 | 
			
		||||
	phys_bytes text, phys_bytes data, phys_bytes stack, phys_bytes gap,
 | 
			
		||||
	phys_bytes text_here, phys_bytes data_here, vir_bytes stacktop));
 | 
			
		||||
_PROTOTYPE( phys_bytes find_kernel_top, (void)				);
 | 
			
		||||
 | 
			
		||||
/* break.c */
 | 
			
		||||
_PROTOTYPE( int do_brk, (message *msg)					);
 | 
			
		||||
@ -76,6 +80,7 @@ _PROTOTYPE(int do_unmap_phys, (message *msg)                            );
 | 
			
		||||
/* pagefaults.c */
 | 
			
		||||
_PROTOTYPE( void handle_pagefaults, (void)				);
 | 
			
		||||
_PROTOTYPE( void handle_memory, (void)				);
 | 
			
		||||
_PROTOTYPE( char *pf_errstr, (u32_t err));
 | 
			
		||||
 | 
			
		||||
/* $(ARCH)/pagetable.c */
 | 
			
		||||
_PROTOTYPE( void pt_init, (void)					);
 | 
			
		||||
@ -112,7 +117,7 @@ _PROTOTYPE(struct vir_region * map_page_region,(struct vmproc *vmp, \
 | 
			
		||||
	vir_bytes min, vir_bytes max, vir_bytes length, vir_bytes what, \
 | 
			
		||||
	u32_t flags, int mapflags));
 | 
			
		||||
_PROTOTYPE(struct vir_region * map_proc_kernel,(struct vmproc *dst));
 | 
			
		||||
_PROTOTYPE(int map_region_extend,(struct vir_region *vr, vir_bytes delta));
 | 
			
		||||
_PROTOTYPE(int map_region_extend,(struct vmproc *vmp, struct vir_region *vr, vir_bytes delta));
 | 
			
		||||
_PROTOTYPE(int map_region_shrink,(struct vir_region *vr, vir_bytes delta));
 | 
			
		||||
_PROTOTYPE(int map_unmap_region,(struct vmproc *vmp, struct vir_region *vr));
 | 
			
		||||
_PROTOTYPE(int map_free_proc,(struct vmproc *vmp));
 | 
			
		||||
 | 
			
		||||
@ -32,8 +32,6 @@ FORWARD _PROTOTYPE(int map_new_physblock, (struct vmproc *vmp,
 | 
			
		||||
FORWARD _PROTOTYPE(int map_copy_ph_block, (struct vmproc *vmp, struct vir_region *region, struct phys_region *ph));
 | 
			
		||||
FORWARD _PROTOTYPE(struct vir_region *map_copy_region, (struct vir_region *));
 | 
			
		||||
 | 
			
		||||
#if SANITYCHECKS
 | 
			
		||||
 | 
			
		||||
FORWARD _PROTOTYPE(void map_printmap, (struct vmproc *vmp));
 | 
			
		||||
 | 
			
		||||
PRIVATE char *map_name(struct vir_region *vr)
 | 
			
		||||
@ -51,7 +49,6 @@ PRIVATE char *map_name(struct vir_region *vr)
 | 
			
		||||
	return "NOTREACHED";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *				map_printmap				     *
 | 
			
		||||
 *===========================================================================*/
 | 
			
		||||
@ -59,21 +56,30 @@ PRIVATE void map_printmap(vmp)
 | 
			
		||||
struct vmproc *vmp;
 | 
			
		||||
{
 | 
			
		||||
	struct vir_region *vr;
 | 
			
		||||
	printf("%d:\n", vmp->vm_endpoint);
 | 
			
		||||
	printf("memory regions in process %d:\n", vmp->vm_endpoint);
 | 
			
		||||
	for(vr = vmp->vm_regions; vr; vr = vr->next) {
 | 
			
		||||
		struct phys_region *ph;
 | 
			
		||||
		printf("\t0x%08lx - 0x%08lx: %s  (0x%lx)\n",
 | 
			
		||||
			vr->vaddr, vr->vaddr + vr->length, map_name(vr), vr);
 | 
			
		||||
		int nph = 0;
 | 
			
		||||
		printf("\t0x%lx - 0x%lx (len 0x%lx), proc 0x%lx-0x%lx: %s\n",
 | 
			
		||||
			vr->vaddr, vr->vaddr + vr->length, vr->length,
 | 
			
		||||
			arch_map2vir(vmp, vr->vaddr),
 | 
			
		||||
			arch_map2vir(vmp, vr->vaddr + vr->length), map_name(vr));
 | 
			
		||||
		printf("\t\tphysical: ");
 | 
			
		||||
		for(ph = vr->first; ph; ph = ph->next) {
 | 
			
		||||
			printf("0x%lx-0x%lx(%d) ",
 | 
			
		||||
			printf("0x%lx-0x%lx (refs %d): phys 0x%lx ",
 | 
			
		||||
				vr->vaddr + ph->ph->offset,
 | 
			
		||||
				vr->vaddr + ph->ph->offset + ph->ph->length,
 | 
			
		||||
				ph->ph->refcount);
 | 
			
		||||
				ph->ph->refcount,
 | 
			
		||||
				ph->ph->phys);
 | 
			
		||||
		}
 | 
			
		||||
		printf("\n");
 | 
			
		||||
		printf(" (phregions %d)\n", nph);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#if SANITYCHECKS
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*===========================================================================*
 | 
			
		||||
 *				map_sanitycheck			     *
 | 
			
		||||
 *===========================================================================*/
 | 
			
		||||
@ -199,6 +205,7 @@ int mapflags;
 | 
			
		||||
                if(maxv <= minv) {
 | 
			
		||||
                        printf("map_page_region: minv 0x%lx and bytes 0x%lx\n",
 | 
			
		||||
                                minv, length);
 | 
			
		||||
			map_printmap(vmp);
 | 
			
		||||
                        return NULL;
 | 
			
		||||
                }
 | 
			
		||||
        }
 | 
			
		||||
@ -236,6 +243,7 @@ int mapflags;
 | 
			
		||||
	if(!foundflag) {
 | 
			
		||||
		printf("VM: map_page_region: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
 | 
			
		||||
			length, vmp->vm_endpoint, minv, maxv);
 | 
			
		||||
		map_printmap(vmp);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -261,8 +269,9 @@ int mapflags;
 | 
			
		||||
	newregion->flags = flags;
 | 
			
		||||
	newregion->tag = VRT_NONE;
 | 
			
		||||
 | 
			
		||||
	/* If this is a 'direct' mapping, try to actually map it. */
 | 
			
		||||
	if(flags & VR_DIRECT) {
 | 
			
		||||
	/* If we know what we're going to map to, map it right away. */
 | 
			
		||||
	if(what != MAP_NONE) {
 | 
			
		||||
		vm_assert(!(what % VM_PAGE_SIZE));
 | 
			
		||||
		vm_assert(!(length % VM_PAGE_SIZE));
 | 
			
		||||
		vm_assert(!(startv % VM_PAGE_SIZE));
 | 
			
		||||
		vm_assert(!newregion->first);
 | 
			
		||||
@ -422,7 +431,7 @@ struct phys_region *physhint;
 | 
			
		||||
 | 
			
		||||
	/* Memory for new physical block. */
 | 
			
		||||
	clicks = CLICKSPERPAGE * length / VM_PAGE_SIZE;
 | 
			
		||||
	if(!what_mem) {
 | 
			
		||||
	if(what_mem == MAP_NONE) {
 | 
			
		||||
		if((mem_clicks = ALLOC_MEM(clicks, PAF_CLEAR)) == NO_MEM) {
 | 
			
		||||
			SLABFREE(newpb);
 | 
			
		||||
			SLABFREE(newphysr);
 | 
			
		||||
@ -446,7 +455,7 @@ struct phys_region *physhint;
 | 
			
		||||
	vm_assert(!(length % VM_PAGE_SIZE));
 | 
			
		||||
	vm_assert(!(newpb->length % VM_PAGE_SIZE));
 | 
			
		||||
	if(map_ph_writept(vmp, region, newpb, NULL, NULL) != OK) {
 | 
			
		||||
		if(!what_mem)
 | 
			
		||||
		if(what_mem == MAP_NONE)
 | 
			
		||||
			FREE_MEM(mem_clicks, clicks);
 | 
			
		||||
		SLABFREE(newpb);
 | 
			
		||||
		SLABFREE(newphysr);
 | 
			
		||||
@ -592,8 +601,8 @@ int write;
 | 
			
		||||
				vmp->vm_endpoint);
 | 
			
		||||
		}
 | 
			
		||||
#endif
 | 
			
		||||
		r = map_new_physblock(vmp, region, virpage, VM_PAGE_SIZE, 0,
 | 
			
		||||
			region->first);
 | 
			
		||||
		r = map_new_physblock(vmp, region, virpage, VM_PAGE_SIZE,
 | 
			
		||||
			MAP_NONE, region->first);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if(r != OK)
 | 
			
		||||
@ -625,7 +634,7 @@ int write;
 | 
			
		||||
		int r;							\
 | 
			
		||||
		SANITYCHECK(SCL_DETAIL);				\
 | 
			
		||||
		if((r=map_new_physblock(vmp, region, start,		\
 | 
			
		||||
			end-start, 0, r1 ? r1 : r2)) != OK) {		\
 | 
			
		||||
			end-start, MAP_NONE, r1 ? r1 : r2)) != OK) {	\
 | 
			
		||||
			SANITYCHECK(SCL_DETAIL);			\
 | 
			
		||||
			return r;					\
 | 
			
		||||
		}							\
 | 
			
		||||
@ -826,7 +835,8 @@ PUBLIC struct vir_region *map_proc_kernel(struct vmproc *vmp)
 | 
			
		||||
/*========================================================================*
 | 
			
		||||
 *				map_region_extend	     	  	*
 | 
			
		||||
 *========================================================================*/
 | 
			
		||||
PUBLIC int map_region_extend(struct vir_region *vr, vir_bytes delta)
 | 
			
		||||
PUBLIC int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
 | 
			
		||||
	vir_bytes delta)
 | 
			
		||||
{
 | 
			
		||||
	vir_bytes end;
 | 
			
		||||
 | 
			
		||||
@ -848,6 +858,8 @@ PUBLIC int map_region_extend(struct vir_region *vr, vir_bytes delta)
 | 
			
		||||
		return OK;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	map_printmap(vmp);
 | 
			
		||||
 | 
			
		||||
	return ENOMEM;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -860,7 +872,9 @@ PUBLIC int map_region_shrink(struct vir_region *vr, vir_bytes delta)
 | 
			
		||||
	vm_assert(vr->flags & VR_ANON);
 | 
			
		||||
	vm_assert(!(delta % VM_PAGE_SIZE));
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
	printf("VM: ignoring region shrink\n");
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	return OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -31,3 +31,6 @@
 | 
			
		||||
 | 
			
		||||
/* Flags to pt_writemap(). */
 | 
			
		||||
#define WMF_OVERWRITE	0x01	/* Caller knows map may overwrite. */
 | 
			
		||||
 | 
			
		||||
/* Special value of 'what' to map_page_region meaning: unknown. */
 | 
			
		||||
#define MAP_NONE	0xFFFFFFFE
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user