
This patch adds (very limited) support for memory-mapping pages on file systems that are mounted on the special "none" device and that do not implement PEEK support by themselves. This includes hgfs, vbfs, and procfs. The solution is implemented in libvtreefs, and consists of allocating pages, filling them with content by calling the file system's READ functionality, passing the pages to VM, and freeing them again. A new VM flag is used to indicate that these pages should be mapped in only once, and thus not cached beyond their single use. This prevents stale data from getting mapped in without the involvement of the file system, which would be problematic on file systems where file contents may become outdated at any time. No VM caching means no sharing and poor performance, but mmap no longer fails on these file systems. Compared to a libc-based approach, this patch retains the on-demand nature of mmap. Especially tail(1) is known to map in a large file area only to use a small portion of it. All file systems now need to be given permission for the SETCACHEPAGE and CLEARCACHE calls to VM. A very basic regression test is added to test74. Change-Id: I17afc4cb97315b515cad1542521b98f293b6b559
81 lines
1.8 KiB
C
81 lines
1.8 KiB
C
|
|
#include "syslib.h"
|
|
|
|
#include <string.h>
|
|
#include <assert.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <minix/vm.h>
|
|
#include <minix/sysutil.h>
|
|
|
|
#include <machine/param.h>
|
|
#include <machine/vmparam.h>
|
|
|
|
static int vm_cachecall(message *m, int call, void *addr, dev_t dev,
|
|
off_t dev_offset, ino_t ino, off_t ino_offset, u32_t *flags,
|
|
int blocksize, int setflags)
|
|
{
|
|
if(blocksize % PAGE_SIZE)
|
|
panic("blocksize %d should be a multiple of pagesize %d\n",
|
|
blocksize, PAGE_SIZE);
|
|
|
|
if(ino_offset % PAGE_SIZE)
|
|
panic("inode offset %lld should be a multiple of pagesize %d\n",
|
|
ino_offset, PAGE_SIZE);
|
|
|
|
if(dev_offset % PAGE_SIZE)
|
|
panic("dev offset offset %lld should be a multiple of pagesize %d\n",
|
|
dev_offset, PAGE_SIZE);
|
|
|
|
memset(m, 0, sizeof(*m));
|
|
|
|
assert(dev != NO_DEV);
|
|
|
|
m->m_vmmcp.dev_offset = dev_offset;
|
|
m->m_vmmcp.ino_offset = ino_offset;
|
|
m->m_vmmcp.ino = ino;
|
|
m->m_vmmcp.block = addr;
|
|
m->m_vmmcp.flags_ptr = flags;
|
|
m->m_vmmcp.dev = dev;
|
|
m->m_vmmcp.pages = blocksize / PAGE_SIZE;
|
|
m->m_vmmcp.flags = setflags;
|
|
|
|
return _taskcall(VM_PROC_NR, call, m);
|
|
}
|
|
|
|
void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
|
|
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
|
|
{
|
|
message m;
|
|
|
|
if(vm_cachecall(&m, VM_MAPCACHEPAGE, NULL, dev, dev_offset,
|
|
ino, ino_offset, flags, blocksize, 0) != OK)
|
|
return MAP_FAILED;
|
|
|
|
return m.m_vmmcp_reply.addr;
|
|
}
|
|
|
|
int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
|
|
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize, int setflags)
|
|
{
|
|
message m;
|
|
|
|
return vm_cachecall(&m, VM_SETCACHEPAGE, block, dev, dev_offset,
|
|
ino, ino_offset, flags, blocksize, setflags);
|
|
}
|
|
|
|
int
|
|
vm_clear_cache(dev_t dev)
|
|
{
|
|
message m;
|
|
|
|
assert(dev != NO_DEV);
|
|
|
|
memset(&m, 0, sizeof(m));
|
|
|
|
m.m_vmmcp.dev = dev;
|
|
|
|
return _taskcall(VM_PROC_NR, VM_CLEARCACHE, &m);
|
|
}
|