VFS: split block, character device handling

All functions prefixed with bdev_ are moved into bdev.c, and those
prefixed with cdev_ are now in cdev.c.  The code in both files are
converted to KNF.  The little (IOCTL-related) code left in device.c
is also cleaned up but should probably be moved into other existing
source files.  This is left to a future patch.  In general, VFS is
long overdue for a source code rebalancing, and the patch here is
only a step in the right direction.

Change-Id: I2fb25734b5778b44f2ff6d2ce331a8e2146e20b0
This commit is contained in:
David van Moolenbroek 2016-01-11 18:33:53 +00:00
parent 232819dd49
commit 89a4204b83
5 changed files with 874 additions and 849 deletions

View File

@ -8,7 +8,7 @@ SRCS= main.c open.c read.c write.c pipe.c dmap.c \
lock.c misc.c utility.c select.c table.c \
vnode.c vmnt.c request.c \
tll.c comm.c worker.c coredump.c \
socket.c
bdev.c cdev.c socket.c
.if ${MKCOVERAGE} != "no"
SRCS+= gcov.c

282
minix/servers/vfs/bdev.c Normal file
View File

@ -0,0 +1,282 @@
/*
* This file contains routines to perform certain block device operations.
* These routines are called when a user application opens or closes a block
* device node, or performs an ioctl(2) call on such an opened node. Reading
* and writing on an opened block device is routed through the file system
* service that has mounted that block device, or the root file system service
* if the block device is not mounted. All block device operations by file
* system services themselves are going directly to the block device, and not
* through VFS.
*
* Block device drivers may not suspend operations for later processing, and
* thus, block device operations simply block their calling thread for the
* duration of the operation.
*
* The entry points in this file are:
* bdev_open: open a block device
* bdev_close: close a block device
* bdev_ioctl: issue an I/O control request on a block device
* bdev_reply: process the result of a block driver request
* bdev_up: a block driver has been mapped in
*/
#include "fs.h"
#include "vnode.h"
#include "file.h"
#include <string.h>
#include <assert.h>
/*
* Send a request to a block device, and suspend the current thread until a
* reply from the driver comes in.
*/
static int
bdev_sendrec(endpoint_t driver_e, message * mess_ptr)
{
int r, status, retry_count;
message mess_retry;
assert(IS_BDEV_RQ(mess_ptr->m_type));
mess_retry = *mess_ptr;
retry_count = 0;
do {
r = drv_sendrec(driver_e, mess_ptr);
if (r != OK)
return r;
status = mess_ptr->m_lblockdriver_lbdev_reply.status;
if (status == ERESTART) {
r = EDEADEPT;
*mess_ptr = mess_retry;
retry_count++;
}
} while (status == ERESTART && retry_count < 5);
/* If we failed to restart the request, return EIO. */
if (status == ERESTART && retry_count >= 5)
return EIO;
if (r != OK) {
if (r == EDEADSRCDST || r == EDEADEPT) {
printf("VFS: dead driver %d\n", driver_e);
dmap_unmap_by_endpt(driver_e);
return EIO;
} else if (r == ELOCKED) {
printf("VFS: deadlock talking to %d\n", driver_e);
return EIO;
}
panic("VFS: uncaught bdev_sendrec failure: %d", r);
}
return OK;
}
/*
* Open a block device.
*/
int
bdev_open(dev_t dev, int bits)
{
devmajor_t major_dev;
devminor_t minor_dev;
message dev_mess;
int r, access;
major_dev = major(dev);
minor_dev = minor(dev);
if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
access = 0;
if (bits & R_BIT) access |= BDEV_R_BIT;
if (bits & W_BIT) access |= BDEV_W_BIT;
/* Set up the message passed to the driver. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_OPEN;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.access = access;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
/* Call the driver. */
r = bdev_sendrec(dmap[major_dev].dmap_driver, &dev_mess);
if (r != OK)
return r;
return dev_mess.m_lblockdriver_lbdev_reply.status;
}
/*
* Close a block device.
*/
int
bdev_close(dev_t dev)
{
devmajor_t major_dev;
devminor_t minor_dev;
message dev_mess;
int r;
major_dev = major(dev);
minor_dev = minor(dev);
if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
/* Set up the message passed to the driver. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_CLOSE;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
/* Call the driver. */
r = bdev_sendrec(dmap[major_dev].dmap_driver, &dev_mess);
if (r != OK)
return r;
return dev_mess.m_lblockdriver_lbdev_reply.status;
}
/*
* Perform an I/O control operation on a block device.
*/
int
bdev_ioctl(dev_t dev, endpoint_t proc_e, unsigned long req, vir_bytes buf)
{
struct dmap *dp;
cp_grant_id_t grant;
message dev_mess;
devmajor_t major_dev;
devminor_t minor_dev;
int r;
major_dev = major(dev);
minor_dev = minor(dev);
/* Determine driver dmap. */
dp = &dmap[major_dev];
if (dp->dmap_driver == NONE) {
printf("VFS: bdev_ioctl: no driver for major %d\n", major_dev);
return ENXIO;
}
/* Set up a grant if necessary. */
grant = make_ioctl_grant(dp->dmap_driver, proc_e, buf, req);
/* Set up the message passed to the driver. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_IOCTL;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.request = req;
dev_mess.m_lbdev_lblockdriver_msg.grant = grant;
dev_mess.m_lbdev_lblockdriver_msg.user = proc_e;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
/* Call the driver. */
r = bdev_sendrec(dp->dmap_driver, &dev_mess);
/* Clean up. */
if (GRANT_VALID(grant)) cpf_revoke(grant);
/* Return the result. */
if (r != OK)
return r;
return dev_mess.m_lblockdriver_lbdev_reply.status;
}
/*
* A block driver has results for a call. There must be a thread waiting for
* these results; wake it up. This function MUST NOT block its calling thread.
*/
void
bdev_reply(void)
{
struct worker_thread *wp;
struct dmap *dp;
if ((dp = get_dmap(who_e)) == NULL) {
printf("VFS: ignoring block dev reply from unknown driver "
"%d\n", who_e);
return;
}
if (dp->dmap_servicing == INVALID_THREAD) {
printf("VFS: ignoring spurious block dev reply from %d\n",
who_e);
return;
}
wp = worker_get(dp->dmap_servicing);
if (wp == NULL || wp->w_task != who_e || wp->w_drv_sendrec == NULL) {
printf("VFS: no worker thread waiting for a reply from %d\n",
who_e);
return;
}
*wp->w_drv_sendrec = m_in;
wp->w_drv_sendrec = NULL;
worker_signal(wp);
}
/*
* A new block device driver has been mapped in. This may affect both mounted
* file systems and open block-special files.
*/
void
bdev_up(devmajor_t maj)
{
int r, found, bits;
struct filp *rfilp;
struct vmnt *vmp;
struct vnode *vp;
char *label;
if (maj < 0 || maj >= NR_DEVICES) panic("VFS: out-of-bound major");
label = dmap[maj].dmap_label;
found = 0;
/*
* For each block-special file that was previously opened on the
* affected device, we need to reopen it on the new driver.
*/
for (rfilp = filp; rfilp < &filp[NR_FILPS]; rfilp++) {
if (rfilp->filp_count < 1) continue;
if ((vp = rfilp->filp_vno) == NULL) continue;
if (major(vp->v_sdev) != maj) continue;
if (!S_ISBLK(vp->v_mode)) continue;
/* Reopen the device on the driver, once per filp. */
bits = rfilp->filp_mode & (R_BIT | W_BIT);
if ((r = bdev_open(vp->v_sdev, bits)) != OK) {
printf("VFS: mounted dev %d/%d re-open failed: %d\n",
maj, minor(vp->v_sdev), r);
dmap[maj].dmap_recovering = 0;
return; /* Give up entirely */
}
found = 1;
}
/* Tell each affected mounted file system about the new endpoint. */
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
if (major(vmp->m_dev) != maj) continue;
/* Send the driver label to the mounted file system. */
if (req_newdriver(vmp->m_fs_e, vmp->m_dev, label) != OK)
printf("VFS: error sending new driver label to %d\n",
vmp->m_fs_e);
}
/*
* If any block-special file was open for this major at all, also
* inform the root file system about the new driver. We do this even
* if the block-special file is linked to another mounted file system,
* merely because it is more work to check for that case.
*/
if (found) {
if (req_newdriver(ROOT_FS_E, makedev(maj, 0), label) != OK)
printf("VFS: error sending new driver label to %d\n",
ROOT_FS_E);
}
}

506
minix/servers/vfs/cdev.c Normal file
View File

@ -0,0 +1,506 @@
/*
* This file contains routines to perform character device operations.
* Character drivers may suspend I/O requests on their devices (read, write,
* ioctl), as well as select requests. These requests will therefore suspend
* their calling process, freeing up the associated VFS worker thread for other
* tasks. The I/O requests may later be cancelled as a result of the suspended
* process receiving a signal (which it either catches or dies from), in which
* case there will be a worker thread associated with the cancellation. Open
* and close requests may not suspend and will thus block the calling thread.
*
* The entry points in this file are:
* cdev_map: map a character device to its actual device number
* cdev_open: open a character device
* cdev_close: close a character device
* cdev_io: initiate a read, write, or ioctl to a character device
* cdev_select: initiate a select call on a device
* cdev_cancel: cancel an I/O request, blocking until it has been cancelled
* cdev_reply: process the result of a character driver request
*/
#include "fs.h"
#include "vnode.h"
#include "file.h"
#include <string.h>
#include <fcntl.h>
#include <sys/ttycom.h>
#include <assert.h>
/*
* Map the given device number to a real device number, remapping /dev/tty to
* the given process's controlling terminal if it has one. Perform a bounds
* check on the resulting device's major number, and return NO_DEV on failure.
* This function is idempotent but not used that way.
*/
dev_t
cdev_map(dev_t dev, struct fproc * rfp)
{
devmajor_t major;
/*
* First cover one special case: /dev/tty, the magic device that
* translates to the controlling TTY.
*/
if ((major = major(dev)) == CTTY_MAJOR) {
/* No controlling terminal? Fail the request. */
if (rfp->fp_tty == NO_DEV) return NO_DEV;
/* Substitute the controlling terminal device. */
dev = rfp->fp_tty;
major = major(dev);
}
if (major < 0 || major >= NR_DEVICES) return NO_DEV;
return dev;
}
/*
* Obtain the dmap structure for the given device, if a valid driver exists for
* the major device. Perform redirection for CTTY_MAJOR.
*/
static struct dmap *
cdev_get(dev_t dev, devminor_t * minor_dev)
{
struct dmap *dp;
int slot;
/*
* Remap /dev/tty as needed. Perform a bounds check on the major
* number.
*/
if ((dev = cdev_map(dev, fp)) == NO_DEV)
return NULL;
/* Determine the driver endpoint. */
dp = &dmap[major(dev)];
/* See if driver is roughly valid. */
if (dp->dmap_driver == NONE) return NULL;
if (isokendpt(dp->dmap_driver, &slot) != OK) {
printf("VFS: cdev_get: old driver for major %x (%d)\n",
major(dev), dp->dmap_driver);
return NULL;
}
/* Also return the (possibly redirected) minor number. */
*minor_dev = minor(dev);
return dp;
}
/*
* A new minor device number has been returned. Request PFS to create a
* temporary device file to hold it.
*/
static int
cdev_clone(int fd, dev_t dev, devminor_t new_minor)
{
struct vnode *vp;
struct node_details res;
int r;
assert(fd != -1);
/* Device number of the new device. */
dev = makedev(major(dev), new_minor);
/* Create a new file system node on PFS for the cloned device. */
r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid,
RWX_MODES | I_CHAR_SPECIAL, dev, &res);
if (r != OK) {
(void)cdev_close(dev);
return r;
}
/* Drop the old node and use the new values. */
if ((vp = get_free_vnode()) == NULL) {
req_putnode(PFS_PROC_NR, res.inode_nr, 1); /* is this right? */
(void)cdev_close(dev);
return err_code;
}
lock_vnode(vp, VNODE_OPCL);
assert(fp->fp_filp[fd] != NULL);
unlock_vnode(fp->fp_filp[fd]->filp_vno);
put_vnode(fp->fp_filp[fd]->filp_vno);
vp->v_fs_e = res.fs_e;
vp->v_vmnt = NULL;
vp->v_dev = NO_DEV;
vp->v_inode_nr = res.inode_nr;
vp->v_mode = res.fmode;
vp->v_sdev = dev;
vp->v_fs_count = 1;
vp->v_ref_count = 1;
fp->fp_filp[fd]->filp_vno = vp;
return OK;
}
/*
* Open or close a character device. The given operation must be either
* CDEV_OPEN or CDEV_CLOSE. For CDEV_OPEN, 'fd' must be the file descriptor
* for the file being opened; for CDEV_CLOSE, it is ignored. For CDEV_OPEN,
* 'flags' identifies a bitwise combination of R_BIT, W_BIT, and/or O_NOCTTY;
* for CDEV_CLOSE, it too is ignored.
*/
static int
cdev_opcl(int op, dev_t dev, int fd, int flags)
{
devminor_t minor_dev, new_minor;
struct dmap *dp;
struct fproc *rfp;
message dev_mess;
int r, r2, acc;
/*
* We need the a descriptor for CDEV_OPEN, because if the driver
* returns a cloned device, we need to replace what the fd points to.
* For CDEV_CLOSE however, we may be closing a device for which the
* calling process has no file descriptor, and thus we expect no
* meaningful fd value in that case.
*/
assert(op == CDEV_OPEN || op == CDEV_CLOSE);
assert(fd != -1 || op == CDEV_CLOSE);
/* Determine task dmap. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return ENXIO;
/*
* CTTY exception: do not actually send the open/close request for
* /dev/tty to the driver. This avoids the case that the actual device
* will remain open forever if the process calls setsid() after opening
* /dev/tty.
*/
if (major(dev) == CTTY_MAJOR) return OK;
/*
* Add O_NOCTTY to the access flags if this process is not a session
* leader, or if it already has a controlling tty, or if it is someone
* else's controlling tty. For performance reasons, only search the
* full process table if this driver has set controlling TTYs before.
*/
if (!(fp->fp_flags & FP_SESLDR) || fp->fp_tty != 0) {
flags |= O_NOCTTY;
} else if (!(flags & O_NOCTTY) && dp->dmap_seen_tty) {
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++)
if (rfp->fp_pid != PID_FREE && rfp->fp_tty == dev)
flags |= O_NOCTTY;
}
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = op;
dev_mess.m_vfs_lchardriver_openclose.minor = minor_dev;
dev_mess.m_vfs_lchardriver_openclose.id = who_e;
if (op == CDEV_OPEN) {
acc = 0;
if (flags & R_BIT) acc |= CDEV_R_BIT;
if (flags & W_BIT) acc |= CDEV_W_BIT;
if (flags & O_NOCTTY) acc |= CDEV_NOCTTY;
dev_mess.m_vfs_lchardriver_openclose.user = who_e;
dev_mess.m_vfs_lchardriver_openclose.access = acc;
}
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_opcl failed: %d", r);
/* Block the thread waiting for a reply. */
self->w_task = dp->dmap_driver;
self->w_drv_sendrec = &dev_mess;
worker_wait();
self->w_task = NONE;
assert(self->w_drv_sendrec == NULL);
/* Process the reply. */
r = dev_mess.m_lchardriver_vfs_reply.status;
if (op == CDEV_OPEN && r >= 0) {
/*
* Some devices need special processing upon open. Such a
* device is "cloned", i.e., on a succesful open it is replaced
* by a new device with a new unique minor device number. This
* new device number identifies a new object that has been
* allocated within a driver.
*/
if (r & CDEV_CLONED) {
new_minor = r & ~(CDEV_CLONED | CDEV_CTTY);
if ((r2 = cdev_clone(fd, dev, new_minor)) < 0)
return r2;
}
/* Did this call make the TTY the controlling TTY? */
if (r & CDEV_CTTY) {
fp->fp_tty = dev;
dp->dmap_seen_tty = TRUE;
}
r = OK;
}
/* Return the result from the driver. */
return r;
}
/*
* Open a character device.
*/
int
cdev_open(int fd, dev_t dev, int flags)
{
return cdev_opcl(CDEV_OPEN, dev, fd, flags);
}
/*
* Close a character device.
*/
int
cdev_close(dev_t dev)
{
return cdev_opcl(CDEV_CLOSE, dev, -1, 0);
}
/*
* Initiate a read, write, or ioctl to a character device. The given operation
* must be CDEV_READ, CDEV_WRITE, or CDEV_IOCTL. The call is made on behalf of
* user process 'proc_e'. For read/write requests, 'bytes' is the number of
* bytes to read into 'buf' at file position 'pos'. For ioctl requests,
* 'bytes' is actually an IOCTL request code, which implies the size of the
* buffer 'buf' if needed for the request at all ('pos' is ignored here). The
* 'flags' field contains file pointer flags, from which O_NONBLOCK is tested.
*/
int
cdev_io(int op, dev_t dev, endpoint_t proc_e, vir_bytes buf, off_t pos,
unsigned long bytes, int flags)
{
devminor_t minor_dev;
struct dmap *dp;
message dev_mess;
cp_grant_id_t gid;
int r;
assert(op == CDEV_READ || op == CDEV_WRITE || op == CDEV_IOCTL);
/* Determine task map. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return EIO;
/*
* Handle TIOCSCTTY ioctl: set controlling TTY. FIXME: this should not
* hardcode major device numbers, and not assume that the IOCTL request
* succeeds!
*/
if (op == CDEV_IOCTL && bytes == TIOCSCTTY &&
(major(dev) == TTY_MAJOR || major(dev) == PTY_MAJOR)) {
fp->fp_tty = dev;
}
/* Create a grant for the buffer provided by the user process. */
if (op != CDEV_IOCTL) {
gid = cpf_grant_magic(dp->dmap_driver, proc_e, buf,
(size_t)bytes, (op == CDEV_READ) ? CPF_WRITE : CPF_READ);
if (!GRANT_VALID(gid))
panic("VFS: cpf_grant_magic failed");
} else
gid = make_ioctl_grant(dp->dmap_driver, proc_e, buf, bytes);
/* Set up the message that will be sent to the driver. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = op;
dev_mess.m_vfs_lchardriver_readwrite.minor = minor_dev;
if (op == CDEV_IOCTL) {
dev_mess.m_vfs_lchardriver_readwrite.request = bytes;
dev_mess.m_vfs_lchardriver_readwrite.user = proc_e;
} else {
dev_mess.m_vfs_lchardriver_readwrite.pos = pos;
dev_mess.m_vfs_lchardriver_readwrite.count = bytes;
}
dev_mess.m_vfs_lchardriver_readwrite.id = proc_e;
dev_mess.m_vfs_lchardriver_readwrite.grant = gid;
dev_mess.m_vfs_lchardriver_readwrite.flags = 0;
if (flags & O_NONBLOCK)
dev_mess.m_vfs_lchardriver_readwrite.flags |= CDEV_NONBLOCK;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_io failed: %d", r);
/* Suspend the calling process until a reply arrives. */
fp->fp_cdev.dev = dev;
fp->fp_cdev.endpt = dp->dmap_driver;
fp->fp_cdev.grant = gid; /* revoke this when unsuspended */
suspend(FP_BLOCKED_ON_CDEV);
return SUSPEND;
}
/*
* Initiate a select call on a device. Return OK iff the request was sent.
* This function explicitly bypasses cdev_get() since it must not do CTTY
* mapping, because a) the caller already has done that, b) "fp" may be wrong.
*/
int
cdev_select(dev_t dev, int ops)
{
devmajor_t major;
message dev_mess;
struct dmap *dp;
int r;
/* Determine task dmap, without CTTY mapping. */
assert(dev != NO_DEV);
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
assert(major != CTTY_MAJOR);
dp = &dmap[major];
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = CDEV_SELECT;
dev_mess.m_vfs_lchardriver_select.minor = minor(dev);
dev_mess.m_vfs_lchardriver_select.ops = ops;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_select failed: %d", r);
return OK;
}
/*
* Cancel an I/O request, blocking until it has been cancelled.
*/
int
cdev_cancel(dev_t dev, endpoint_t endpt __unused, cp_grant_id_t grant)
{
devminor_t minor_dev;
message dev_mess;
struct dmap *dp;
int r;
/* Determine task dmap. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return EIO;
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = CDEV_CANCEL;
dev_mess.m_vfs_lchardriver_cancel.minor = minor_dev;
dev_mess.m_vfs_lchardriver_cancel.id = fp->fp_endpoint;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_cancel failed: %d", r);
/* Suspend this thread until we have received the response. */
self->w_task = dp->dmap_driver;
self->w_drv_sendrec = &dev_mess;
worker_wait();
self->w_task = NONE;
assert(self->w_drv_sendrec == NULL);
/* Clean up. */
if (GRANT_VALID(grant))
(void)cpf_revoke(grant);
/* Return the result. Note that the request may have completed. */
r = dev_mess.m_lchardriver_vfs_reply.status;
return (r == EAGAIN) ? EINTR : r; /* see below regarding error codes */
}
/*
* A character driver has results for an open, close, read, write, or ioctl
* call (i.e., everything except select). There may be a thread waiting for
* these results as part of an ongoing open, close, or (for read/write/ioctl)
* cancel call. If so, wake up that thread; if not, send a reply to the
* requesting process. This function MUST NOT block its calling thread.
*/
static void
cdev_generic_reply(message * m_ptr)
{
struct fproc *rfp;
struct worker_thread *wp;
endpoint_t proc_e;
int r, slot;
proc_e = m_ptr->m_lchardriver_vfs_reply.id;
if (m_ptr->m_lchardriver_vfs_reply.status == SUSPEND) {
printf("VFS: ignoring SUSPEND status from %d\n",
m_ptr->m_source);
return;
}
if (isokendpt(proc_e, &slot) != OK) {
printf("VFS: proc %d from %d not found\n",
proc_e, m_ptr->m_source);
return;
}
rfp = &fproc[slot];
wp = rfp->fp_worker;
if (wp != NULL && wp->w_task == who_e && wp->w_drv_sendrec != NULL) {
assert(!fp_is_blocked(rfp));
*wp->w_drv_sendrec = *m_ptr;
wp->w_drv_sendrec = NULL;
worker_signal(wp); /* continue open/close/cancel */
} else if (rfp->fp_blocked_on != FP_BLOCKED_ON_CDEV ||
rfp->fp_cdev.endpt != m_ptr->m_source) {
/*
* This would typically be caused by a protocol error, i.e., a
* driver not properly following the character driver protocol.
*/
printf("VFS: proc %d not blocked on %d\n",
proc_e, m_ptr->m_source);
} else {
/*
* Some services (e.g., inet) use the same infrastructure for
* nonblocking and cancelled requests, resulting in one of
* EINTR or EAGAIN when the other is really the appropriate
* code. Thus, cdev_cancel converts EAGAIN into EINTR, and we
* convert EINTR into EAGAIN here.
*/
r = m_ptr->m_lchardriver_vfs_reply.status;
revive(proc_e, (r == EINTR) ? EAGAIN : r);
}
}
/*
* A character driver has results for us.
*/
void
cdev_reply(void)
{
if (get_dmap(who_e) == NULL) {
printf("VFS: ignoring char dev reply from unknown driver %d\n",
who_e);
return;
}
switch (call_nr) {
case CDEV_REPLY:
cdev_generic_reply(&m_in);
break;
case CDEV_SEL1_REPLY:
select_reply1(m_in.m_source, m_in.m_lchardriver_vfs_sel1.minor,
m_in.m_lchardriver_vfs_sel1.status);
break;
case CDEV_SEL2_REPLY:
select_reply2(m_in.m_source, m_in.m_lchardriver_vfs_sel2.minor,
m_in.m_lchardriver_vfs_sel2.status);
break;
default:
printf("VFS: char driver %u sent unknown reply %x\n",
who_e, call_nr);
}
}

View File

@ -1,861 +1,91 @@
/* When a needed block is not in the cache, it must be fetched from the disk.
* Special character files also require I/O. The routines for these are here.
/*
* This file contains a number of device-type independent device routines.
*
* The entry points in this file are:
* cdev_open: open a character device
* cdev_close: close a character device
* cdev_io: initiate a read, write, or ioctl to a character device
* cdev_select: initiate a select call on a device
* cdev_cancel: cancel an I/O request, blocking until it has been cancelled
* cdev_reply: process the result of a character driver request
* bdev_open: open a block device
* bdev_close: close a block device
* bdev_reply: process the result of a block driver request
* bdev_up: a block driver has been mapped in
* do_ioctl: perform the IOCTL system call
* do_ioctl: perform the IOCTL system call
* make_ioctl_grant: make a grant for an IOCTL request to a device
*/
#include "fs.h"
#include <string.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/stat.h>
#include <sys/ttycom.h>
#include <minix/callnr.h>
#include <minix/com.h>
#include <minix/endpoint.h>
#include <minix/ioctl.h>
#include <minix/u64.h>
#include "file.h"
#include "dmap.h"
#include <minix/vfsif.h>
#include "vnode.h"
#include "vmnt.h"
#include "file.h"
#include <sys/ioctl.h>
static int cdev_opcl(int op, int fd, dev_t dev, int flags);
static int block_io(endpoint_t driver_e, message *mess_ptr);
static cp_grant_id_t make_grant(endpoint_t driver_e, endpoint_t user_e, int op,
vir_bytes buf, unsigned long size);
/*===========================================================================*
* bdev_open *
*===========================================================================*/
int bdev_open(dev_t dev, int access)
/*
* Perform the ioctl(2) system call.
*/
int
do_ioctl(void)
{
/* Open a block device. */
devmajor_t major_dev;
devminor_t minor_dev;
message dev_mess;
int r;
unsigned long request;
struct filp *f;
register struct vnode *vp;
vir_bytes arg;
int r, fd;
major_dev = major(dev);
minor_dev = minor(dev);
if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
fd = job_m_in.m_lc_vfs_ioctl.fd;
request = job_m_in.m_lc_vfs_ioctl.req;
arg = (vir_bytes)job_m_in.m_lc_vfs_ioctl.arg;
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_OPEN;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.access = 0;
if (access & R_BIT) dev_mess.m_lbdev_lblockdriver_msg.access |= BDEV_R_BIT;
if (access & W_BIT) dev_mess.m_lbdev_lblockdriver_msg.access |= BDEV_W_BIT;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
if ((f = get_filp(fd, VNODE_READ)) == NULL)
return(err_code);
vp = f->filp_vno; /* get vnode pointer */
switch (vp->v_mode & S_IFMT) {
case S_IFBLK:
f->filp_ioctl_fp = fp;
r = bdev_ioctl(vp->v_sdev, who_e, request, arg);
f->filp_ioctl_fp = NULL;
break;
case S_IFCHR:
r = cdev_io(CDEV_IOCTL, vp->v_sdev, who_e, arg, 0, request,
f->filp_flags);
break;
default:
r = ENOTTY;
}
unlock_filp(f);
/* Call the task. */
r = block_io(dmap[major_dev].dmap_driver, &dev_mess);
if (r != OK)
return r;
return dev_mess.m_lblockdriver_lbdev_reply.status;
}
/*===========================================================================*
* bdev_close *
*===========================================================================*/
int bdev_close(dev_t dev)
/*
* Create a magic grant for the given IOCTL request.
*/
cp_grant_id_t
make_ioctl_grant(endpoint_t driver_e, endpoint_t user_e, vir_bytes buf,
unsigned long request)
{
/* Close a block device. */
devmajor_t major_dev;
devminor_t minor_dev;
message dev_mess;
int r;
cp_grant_id_t grant;
int access;
size_t size;
major_dev = major(dev);
minor_dev = minor(dev);
if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_CLOSE;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
r = block_io(dmap[major_dev].dmap_driver, &dev_mess);
if (r != OK)
return r;
return dev_mess.m_lblockdriver_lbdev_reply.status;
}
/*===========================================================================*
* bdev_ioctl *
*===========================================================================*/
static int bdev_ioctl(dev_t dev, endpoint_t proc_e, unsigned long req,
vir_bytes buf)
{
/* Perform an I/O control operation on a block device. */
struct dmap *dp;
cp_grant_id_t gid;
message dev_mess;
devmajor_t major_dev;
devminor_t minor_dev;
int r;
major_dev = major(dev);
minor_dev = minor(dev);
/* Determine task dmap. */
dp = &dmap[major_dev];
if (dp->dmap_driver == NONE) {
printf("VFS: bdev_ioctl: no driver for major %d\n", major_dev);
return(ENXIO);
}
/* Set up a grant if necessary. */
gid = make_grant(dp->dmap_driver, proc_e, BDEV_IOCTL, buf, req);
/* Set up the message passed to the task. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = BDEV_IOCTL;
dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
dev_mess.m_lbdev_lblockdriver_msg.request = req;
dev_mess.m_lbdev_lblockdriver_msg.grant = gid;
dev_mess.m_lbdev_lblockdriver_msg.user = proc_e;
dev_mess.m_lbdev_lblockdriver_msg.id = 0;
/* Call the task. */
r = block_io(dp->dmap_driver, &dev_mess);
/* Clean up. */
if (GRANT_VALID(gid)) cpf_revoke(gid);
/* Return the result. */
if (r != OK)
return(r);
return(dev_mess.m_lblockdriver_lbdev_reply.status);
}
/*===========================================================================*
* make_grant *
*===========================================================================*/
static cp_grant_id_t make_grant(endpoint_t driver_e, endpoint_t user_e, int op,
vir_bytes buf, unsigned long bytes)
{
/* Create a magic grant for the given operation and buffer. */
cp_grant_id_t gid;
int access;
size_t size;
switch (op) {
case CDEV_READ:
case CDEV_WRITE:
gid = cpf_grant_magic(driver_e, user_e, buf,
(size_t) bytes, op == CDEV_READ ? CPF_WRITE : CPF_READ);
break;
case CDEV_IOCTL:
case BDEV_IOCTL:
/* For IOCTLs, the bytes parameter contains the IOCTL request.
/*
* For IOCTLs, the bytes parameter contains the IOCTL request.
* This request encodes the requested access method and buffer size.
*/
access = 0;
if(_MINIX_IOCTL_IOR(bytes)) access |= CPF_WRITE;
if(_MINIX_IOCTL_IOW(bytes)) access |= CPF_READ;
if(_MINIX_IOCTL_BIG(bytes))
size = _MINIX_IOCTL_SIZE_BIG(bytes);
if (_MINIX_IOCTL_IOR(request)) access |= CPF_WRITE;
if (_MINIX_IOCTL_IOW(request)) access |= CPF_READ;
if (_MINIX_IOCTL_BIG(request))
size = _MINIX_IOCTL_SIZE_BIG(request);
else
size = _MINIX_IOCTL_SIZE(bytes);
size = _MINIX_IOCTL_SIZE(request);
/* Grant access to the buffer even if no I/O happens with the ioctl,
/*
* Grant access to the buffer even if no I/O happens with the ioctl,
* although now that we no longer identify responses based on grants,
* this is not strictly necessary.
*/
gid = cpf_grant_magic(driver_e, user_e, buf, size, access);
break;
grant = cpf_grant_magic(driver_e, user_e, buf, size, access);
default:
panic("VFS: unknown operation %d", op);
}
if (!GRANT_VALID(grant))
panic("VFS: cpf_grant_magic failed");
if (!GRANT_VALID(gid))
panic("VFS: cpf_grant_magic failed");
return gid;
}
/*===========================================================================*
* cdev_map *
*===========================================================================*/
dev_t cdev_map(dev_t dev, struct fproc *rfp)
{
/* Map the given device number to a real device number, remapping /dev/tty to
* the given process's controlling terminal if it has one. Perform a bounds
* check on the resulting device's major number, and return NO_DEV on failure.
* This function is idempotent but not used that way.
*/
devmajor_t major;
/* First cover one special case: /dev/tty, the magic device that translates
* to the controlling tty.
*/
if ((major = major(dev)) == CTTY_MAJOR) {
/* No controlling terminal? Fail the request. */
if (rfp->fp_tty == NO_DEV) return NO_DEV;
/* Substitute the controlling terminal device. */
dev = rfp->fp_tty;
major = major(dev);
}
if (major < 0 || major >= NR_DEVICES) return NO_DEV;
return dev;
}
/*===========================================================================*
* cdev_get *
*===========================================================================*/
static struct dmap *cdev_get(dev_t dev, devminor_t *minor_dev)
{
/* Obtain the dmap structure for the given device, if a valid driver exists for
* the major device. Perform redirection for CTTY_MAJOR.
*/
struct dmap *dp;
int slot;
/* Remap /dev/tty as needed. Perform a bounds check on the major number. */
if ((dev = cdev_map(dev, fp)) == NO_DEV)
return(NULL);
/* Determine task dmap. */
dp = &dmap[major(dev)];
/* See if driver is roughly valid. */
if (dp->dmap_driver == NONE) return(NULL);
if (isokendpt(dp->dmap_driver, &slot) != OK) {
printf("VFS: cdev_get: old driver for major %x (%d)\n", major(dev),
dp->dmap_driver);
return(NULL);
}
/* Also return the (possibly redirected) minor number. */
*minor_dev = minor(dev);
return dp;
}
/*===========================================================================*
* cdev_io *
*===========================================================================*/
int cdev_io(
int op, /* CDEV_READ, CDEV_WRITE, or CDEV_IOCTL */
dev_t dev, /* major-minor device number */
endpoint_t proc_e, /* in whose address space is buf? */
vir_bytes buf, /* virtual address of the buffer */
off_t pos, /* byte position */
unsigned long bytes, /* how many bytes to transfer, or request */
int flags /* special flags, like O_NONBLOCK */
)
{
/* Initiate a read, write, or ioctl to a character device. */
devminor_t minor_dev;
struct dmap *dp;
message dev_mess;
cp_grant_id_t gid;
int r;
assert(op == CDEV_READ || op == CDEV_WRITE || op == CDEV_IOCTL);
/* Determine task map. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return(EIO);
/* Handle TIOCSCTTY ioctl: set controlling tty.
* TODO: cleaner implementation work in progress.
*/
if (op == CDEV_IOCTL && bytes == TIOCSCTTY &&
(major(dev) == TTY_MAJOR || major(dev) == PTY_MAJOR)) {
fp->fp_tty = dev;
}
/* Create a grant for the buffer provided by the user process. */
gid = make_grant(dp->dmap_driver, proc_e, op, buf, bytes);
/* Set up the rest of the message that will be sent to the driver. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = op;
dev_mess.m_vfs_lchardriver_readwrite.minor = minor_dev;
if (op == CDEV_IOCTL) {
dev_mess.m_vfs_lchardriver_readwrite.request = bytes;
dev_mess.m_vfs_lchardriver_readwrite.user = proc_e;
} else {
dev_mess.m_vfs_lchardriver_readwrite.pos = pos;
dev_mess.m_vfs_lchardriver_readwrite.count = bytes;
}
dev_mess.m_vfs_lchardriver_readwrite.id = proc_e;
dev_mess.m_vfs_lchardriver_readwrite.grant = gid;
dev_mess.m_vfs_lchardriver_readwrite.flags = 0;
if (flags & O_NONBLOCK)
dev_mess.m_vfs_lchardriver_readwrite.flags |= CDEV_NONBLOCK;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_io failed: %d", r);
/* Suspend the calling process until a reply arrives. */
fp->fp_cdev.dev = dev;
fp->fp_cdev.endpt = dp->dmap_driver;
fp->fp_cdev.grant = gid; /* revoke this when unsuspended */
suspend(FP_BLOCKED_ON_CDEV);
return SUSPEND;
}
/*===========================================================================*
* cdev_clone *
*===========================================================================*/
static int cdev_clone(int fd, dev_t dev, devminor_t new_minor)
{
/* A new minor device number has been returned. Request PFS to create a
* temporary device file to hold it.
*/
struct vnode *vp;
struct node_details res;
int r;
assert(fd != -1);
/* Device number of the new device. */
dev = makedev(major(dev), new_minor);
/* Issue request */
r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid,
RWX_MODES | I_CHAR_SPECIAL, dev, &res);
if (r != OK) {
(void)cdev_opcl(CDEV_CLOSE, -1, dev, 0);
return r;
}
/* Drop old node and use the new values */
if ((vp = get_free_vnode()) == NULL) {
req_putnode(PFS_PROC_NR, res.inode_nr, 1); /* is this right? */
(void)cdev_opcl(CDEV_CLOSE, -1, dev, 0);
return(err_code);
}
lock_vnode(vp, VNODE_OPCL);
assert(fp->fp_filp[fd] != NULL);
unlock_vnode(fp->fp_filp[fd]->filp_vno);
put_vnode(fp->fp_filp[fd]->filp_vno);
vp->v_fs_e = res.fs_e;
vp->v_vmnt = NULL;
vp->v_dev = NO_DEV;
vp->v_fs_e = res.fs_e;
vp->v_inode_nr = res.inode_nr;
vp->v_mode = res.fmode;
vp->v_sdev = dev;
vp->v_fs_count = 1;
vp->v_ref_count = 1;
fp->fp_filp[fd]->filp_vno = vp;
return OK;
}
/*===========================================================================*
* cdev_opcl *
*===========================================================================*/
static int cdev_opcl(
int op, /* operation, CDEV_OPEN or CDEV_CLOSE */
int fd, /* file descriptor (open) or -1 (close) */
dev_t dev, /* device to open or close */
int flags /* mode bits and flags */
)
{
/* Open or close a character device. */
devminor_t minor_dev, new_minor;
struct dmap *dp;
struct fproc *rfp;
message dev_mess;
int r, r2;
/*
* We need the a descriptor for CDEV_OPEN, because if the driver returns a
* cloned device, we need to replace what the fd points to. For CDEV_CLOSE
* however, we may be closing a device for which the calling process has no
* file descriptor, and thus we expect no meaningful fd value in that case.
*/
assert(op == CDEV_OPEN || op == CDEV_CLOSE);
assert(fd != -1 || op == CDEV_CLOSE);
/* Determine task dmap. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return(ENXIO);
/* CTTY exception: do not actually send the open/close request for /dev/tty
* to the driver. This avoids the case that the actual device will remain
* open forever if the process calls setsid() after opening /dev/tty.
*/
if (major(dev) == CTTY_MAJOR) return(OK);
/* Add O_NOCTTY to the access flags if this process is not a session leader,
* or if it already has a controlling tty, or if it is someone else's
* controlling tty. For performance reasons, only search the full process
* table if this driver has set controlling ttys before.
*/
if (!(fp->fp_flags & FP_SESLDR) || fp->fp_tty != 0) {
flags |= O_NOCTTY;
} else if (!(flags & O_NOCTTY) && dp->dmap_seen_tty) {
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++)
if (rfp->fp_pid != PID_FREE && rfp->fp_tty == dev)
flags |= O_NOCTTY;
}
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = op;
dev_mess.m_vfs_lchardriver_openclose.minor = minor_dev;
dev_mess.m_vfs_lchardriver_openclose.id = who_e;
if (op == CDEV_OPEN) {
dev_mess.m_vfs_lchardriver_openclose.user = who_e;
dev_mess.m_vfs_lchardriver_openclose.access = 0;
if (flags & R_BIT)
dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_R_BIT;
if (flags & W_BIT)
dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_W_BIT;
if (flags & O_NOCTTY)
dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_NOCTTY;
}
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_opcl failed: %d", r);
/* Block the thread waiting for a reply. */
self->w_task = dp->dmap_driver;
self->w_drv_sendrec = &dev_mess;
worker_wait();
self->w_task = NONE;
assert(self->w_drv_sendrec == NULL);
/* Process the reply. */
r = dev_mess.m_lchardriver_vfs_reply.status;
if (op == CDEV_OPEN && r >= 0) {
/* Some devices need special processing upon open. Such a device is
* "cloned", i.e. on a succesful open it is replaced by a new device
* with a new unique minor device number. This new device number
* identifies a new object (such as a new network connection) that has
* been allocated within a driver.
*/
if (r & CDEV_CLONED) {
new_minor = r & ~(CDEV_CLONED | CDEV_CTTY);
if ((r2 = cdev_clone(fd, dev, new_minor)) < 0)
return(r2);
}
/* Did this call make the tty the controlling tty? */
if (r & CDEV_CTTY) {
fp->fp_tty = dev;
dp->dmap_seen_tty = TRUE;
}
r = OK;
}
/* Return the result from the driver. */
return(r);
}
/*===========================================================================*
* cdev_open *
*===========================================================================*/
int cdev_open(int fd, dev_t dev, int flags)
{
/* Open a character device. */
return cdev_opcl(CDEV_OPEN, fd, dev, flags);
}
/*===========================================================================*
* cdev_close *
*===========================================================================*/
int cdev_close(dev_t dev)
{
/* Close a character device. */
return cdev_opcl(CDEV_CLOSE, -1, dev, 0);
}
/*===========================================================================*
* do_ioctl *
*===========================================================================*/
int do_ioctl(void)
{
/* Perform the ioctl(2) system call. */
unsigned long ioctlrequest;
int fd, r = OK;
struct filp *f;
register struct vnode *vp;
dev_t dev;
vir_bytes argx;
fd = job_m_in.m_lc_vfs_ioctl.fd;
ioctlrequest = job_m_in.m_lc_vfs_ioctl.req;
argx = (vir_bytes)job_m_in.m_lc_vfs_ioctl.arg;
if ((f = get_filp(fd, VNODE_READ)) == NULL)
return(err_code);
vp = f->filp_vno; /* get vnode pointer */
if (!S_ISCHR(vp->v_mode) && !S_ISBLK(vp->v_mode)) {
r = ENOTTY;
}
if (r == OK) {
dev = vp->v_sdev;
if (S_ISBLK(vp->v_mode)) {
f->filp_ioctl_fp = fp;
r = bdev_ioctl(dev, who_e, ioctlrequest, argx);
f->filp_ioctl_fp = NULL;
} else
r = cdev_io(CDEV_IOCTL, dev, who_e, argx, 0, ioctlrequest,
f->filp_flags);
}
unlock_filp(f);
return(r);
}
/*===========================================================================*
* cdev_select *
*===========================================================================*/
int cdev_select(dev_t dev, int ops)
{
/* Initiate a select call on a device. Return OK iff the request was sent.
* This function explicitly bypasses cdev_get() since it must not do CTTY
* mapping, because a) the caller already has done that, b) "fp" may be wrong.
*/
devmajor_t major;
message dev_mess;
struct dmap *dp;
int r;
/* Determine task dmap, without CTTY mapping. */
assert(dev != NO_DEV);
major = major(dev);
assert(major >= 0 && major < NR_DEVICES);
assert(major != CTTY_MAJOR);
dp = &dmap[major];
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = CDEV_SELECT;
dev_mess.m_vfs_lchardriver_select.minor = minor(dev);
dev_mess.m_vfs_lchardriver_select.ops = ops;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_select failed: %d", r);
return(OK);
}
/*===========================================================================*
* cdev_cancel *
*===========================================================================*/
int cdev_cancel(dev_t dev, endpoint_t endpt __unused, cp_grant_id_t grant)
{
/* Cancel an I/O request, blocking until it has been cancelled. */
devminor_t minor_dev;
message dev_mess;
struct dmap *dp;
int r;
/* Determine task dmap. */
if ((dp = cdev_get(dev, &minor_dev)) == NULL)
return(EIO);
/* Prepare the request message. */
memset(&dev_mess, 0, sizeof(dev_mess));
dev_mess.m_type = CDEV_CANCEL;
dev_mess.m_vfs_lchardriver_cancel.minor = minor_dev;
dev_mess.m_vfs_lchardriver_cancel.id = fp->fp_endpoint;
/* Send the request to the driver. */
if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
panic("VFS: asynsend in cdev_cancel failed: %d", r);
/* Suspend this thread until we have received the response. */
self->w_task = dp->dmap_driver;
self->w_drv_sendrec = &dev_mess;
worker_wait();
self->w_task = NONE;
assert(self->w_drv_sendrec == NULL);
/* Clean up. */
if (GRANT_VALID(grant))
(void)cpf_revoke(grant);
/* Return the result (note: the request may have completed). */
r = dev_mess.m_lchardriver_vfs_reply.status;
return (r == EAGAIN) ? EINTR : r;
}
/*===========================================================================*
* block_io *
*===========================================================================*/
static int block_io(endpoint_t driver_e, message *mess_ptr)
{
/* Perform I/O on a block device. The current thread is suspended until a reply
* comes in from the driver.
*/
int r, status, retry_count;
message mess_retry;
assert(IS_BDEV_RQ(mess_ptr->m_type));
mess_retry = *mess_ptr;
retry_count = 0;
do {
r = drv_sendrec(driver_e, mess_ptr);
if (r != OK)
return r;
status = mess_ptr->m_lblockdriver_lbdev_reply.status;
if (status == ERESTART) {
r = EDEADEPT;
*mess_ptr = mess_retry;
retry_count++;
}
} while (status == ERESTART && retry_count < 5);
/* If we failed to restart the request, return EIO */
if (status == ERESTART && retry_count >= 5)
return EIO;
if (r != OK) {
if (r == EDEADSRCDST || r == EDEADEPT) {
printf("VFS: dead driver %d\n", driver_e);
dmap_unmap_by_endpt(driver_e);
return(EIO);
} else if (r == ELOCKED) {
printf("VFS: ELOCKED talking to %d\n", driver_e);
return(EIO);
}
panic("block_io: can't send/receive: %d", r);
}
return(OK);
}
/*===========================================================================*
* bdev_up *
*===========================================================================*/
void bdev_up(devmajor_t maj)
{
/* A new block device driver has been mapped in. This may affect both mounted
* file systems and open block-special files.
*/
int r, found, bits;
struct filp *rfilp;
struct vmnt *vmp;
struct vnode *vp;
char *label;
if (maj < 0 || maj >= NR_DEVICES) panic("VFS: out-of-bound major");
label = dmap[maj].dmap_label;
found = 0;
/* For each block-special file that was previously opened on the affected
* device, we need to reopen it on the new driver.
*/
for (rfilp = filp; rfilp < &filp[NR_FILPS]; rfilp++) {
if (rfilp->filp_count < 1 || !(vp = rfilp->filp_vno)) continue;
if (major(vp->v_sdev) != maj) continue;
if (!S_ISBLK(vp->v_mode)) continue;
/* Reopen the device on the driver, once per filp. */
bits = rfilp->filp_mode & (R_BIT|W_BIT);
if ((r = bdev_open(vp->v_sdev, bits)) != OK) {
printf("VFS: mounted dev %d/%d re-open failed: %d.\n",
maj, minor(vp->v_sdev), r);
dmap[maj].dmap_recovering = 0;
return; /* Give up entirely */
}
found = 1;
}
/* Tell each affected mounted file system about the new endpoint.
*/
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
if (major(vmp->m_dev) != maj) continue;
/* Send the driver label to the mounted file system. */
if (OK != req_newdriver(vmp->m_fs_e, vmp->m_dev, label))
printf("VFS dev_up: error sending new driver label to %d\n",
vmp->m_fs_e);
}
/* If any block-special file was open for this major at all, also inform the
* root file system about the new driver. We do this even if the
* block-special file is linked to another mounted file system, merely
* because it is more work to check for that case.
*/
if (found) {
if (OK != req_newdriver(ROOT_FS_E, makedev(maj, 0), label))
printf("VFSdev_up: error sending new driver label to %d\n",
ROOT_FS_E);
}
}
/*===========================================================================*
* cdev_generic_reply *
*===========================================================================*/
static void cdev_generic_reply(message *m_ptr)
{
/* A character driver has results for an open, close, read, write, or ioctl
* call (i.e., everything except select). There may be a thread waiting for
* these results as part of an ongoing open, close, or (for read/write/ioctl)
* cancel call. If so, wake up that thread; if not, send a reply to the
* requesting process. This function MUST NOT block its calling thread.
*/
struct fproc *rfp;
struct worker_thread *wp;
endpoint_t proc_e;
int r, slot;
proc_e = m_ptr->m_lchardriver_vfs_reply.id;
if (m_ptr->m_lchardriver_vfs_reply.status == SUSPEND) {
printf("VFS: got SUSPEND from %d, not reviving\n", m_ptr->m_source);
return;
}
if (isokendpt(proc_e, &slot) != OK) {
printf("VFS: proc %d from %d not found\n", proc_e, m_ptr->m_source);
return;
}
rfp = &fproc[slot];
wp = rfp->fp_worker;
if (wp != NULL && wp->w_task == who_e && wp->w_drv_sendrec != NULL) {
assert(!fp_is_blocked(rfp));
*wp->w_drv_sendrec = *m_ptr;
wp->w_drv_sendrec = NULL;
worker_signal(wp); /* Continue open/close/cancel */
} else if (rfp->fp_blocked_on != FP_BLOCKED_ON_CDEV ||
rfp->fp_cdev.endpt != m_ptr->m_source) {
/* This would typically be caused by a protocol error, i.e. a driver
* not properly following the character driver protocol rules.
*/
printf("VFS: proc %d not blocked on %d\n", proc_e, m_ptr->m_source);
} else {
/* Some services (inet) use the same infrastructure for nonblocking
* and cancelled requests, resulting in one of EINTR or EAGAIN when the
* other is really the appropriate code. Thus, cdev_cancel converts
* EAGAIN into EINTR, and we convert EINTR into EAGAIN here.
*/
r = m_ptr->m_lchardriver_vfs_reply.status;
revive(proc_e, (r == EINTR) ? EAGAIN : r);
}
}
/*===========================================================================*
* cdev_reply *
*===========================================================================*/
void cdev_reply(void)
{
/* A character driver has results for us. */
if (get_dmap(who_e) == NULL) {
printf("VFS: ignoring char dev reply from unknown driver %d\n", who_e);
return;
}
switch (call_nr) {
case CDEV_REPLY:
cdev_generic_reply(&m_in);
break;
case CDEV_SEL1_REPLY:
select_reply1(m_in.m_source, m_in.m_lchardriver_vfs_sel1.minor,
m_in.m_lchardriver_vfs_sel1.status);
break;
case CDEV_SEL2_REPLY:
select_reply2(m_in.m_source, m_in.m_lchardriver_vfs_sel2.minor,
m_in.m_lchardriver_vfs_sel2.status);
break;
default:
printf("VFS: char driver %u sent unknown reply %x\n", who_e, call_nr);
}
}
/*===========================================================================*
* bdev_reply *
*===========================================================================*/
void bdev_reply(void)
{
/* A block driver has results for a call. There must be a thread waiting for
* these results - wake it up. This function MUST NOT block its calling thread.
*/
struct worker_thread *wp;
struct dmap *dp;
if ((dp = get_dmap(who_e)) == NULL) {
printf("VFS: ignoring block dev reply from unknown driver %d\n",
who_e);
return;
}
if (dp->dmap_servicing == INVALID_THREAD) {
printf("VFS: ignoring spurious block dev reply from %d\n", who_e);
return;
}
wp = worker_get(dp->dmap_servicing);
if (wp == NULL || wp->w_task != who_e || wp->w_drv_sendrec == NULL) {
printf("VFS: no worker thread waiting for a reply from %d\n", who_e);
return;
}
*wp->w_drv_sendrec = m_in;
wp->w_drv_sendrec = NULL;
worker_signal(wp);
return grant;
}

View File

@ -20,6 +20,23 @@ struct lookup;
struct worker_thread;
struct job;
/* bdev.c */
int bdev_open(dev_t dev, int access);
int bdev_close(dev_t dev);
int bdev_ioctl(dev_t dev, endpoint_t proc_e, unsigned long req, vir_bytes buf);
void bdev_reply(void);
void bdev_up(devmajor_t major);
/* cdev.c */
dev_t cdev_map(dev_t dev, struct fproc *rfp);
int cdev_open(int fd, dev_t dev, int flags);
int cdev_close(dev_t dev);
int cdev_io(int op, dev_t dev, endpoint_t proc_e, vir_bytes buf, off_t pos,
unsigned long bytes, int flags);
int cdev_select(dev_t dev, int ops);
int cdev_cancel(dev_t dev, endpoint_t endpt, cp_grant_id_t grant);
void cdev_reply(void);
/* comm.c */
int drv_sendrec(endpoint_t drv_e, message *reqm);
void fs_cancel(struct vmnt *vmp);
@ -30,19 +47,9 @@ void send_work(void);
int vm_vfs_procctl_handlemem(endpoint_t ep, vir_bytes mem, vir_bytes len, int flags);
/* device.c */
int cdev_open(int fd, dev_t dev, int flags);
int cdev_close(dev_t dev);
int cdev_io(int op, dev_t dev, endpoint_t proc_e, vir_bytes buf, off_t pos,
unsigned long bytes, int flags);
dev_t cdev_map(dev_t dev, struct fproc *rfp);
int cdev_select(dev_t dev, int ops);
int cdev_cancel(dev_t dev, endpoint_t endpt, cp_grant_id_t grant);
void cdev_reply(void);
int bdev_open(dev_t dev, int access);
int bdev_close(dev_t dev);
void bdev_reply(void);
void bdev_up(devmajor_t major);
int do_ioctl(void);
cp_grant_id_t make_ioctl_grant(endpoint_t driver_e, endpoint_t user_e,
vir_bytes buf, unsigned long request);
/* dmap.c */
void lock_dmap(struct dmap *dp);
@ -300,8 +307,8 @@ int copy_path(char *dest, size_t size);
int fetch_name(vir_bytes path, size_t len, char *dest);
int isokendpt_f(const char *f, int l, endpoint_t e, int *p, int ft);
int in_group(struct fproc *rfp, gid_t grp);
int sys_datacopy_wrapper(endpoint_t src, vir_bytes srcv, endpoint_t dst, vir_bytes dstv,
size_t len);
int sys_datacopy_wrapper(endpoint_t src, vir_bytes srcv, endpoint_t dst,
vir_bytes dstv, size_t len);
#define okendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 1)
#define isokendpt(e, p) isokendpt_f(__FILE__, __LINE__, (e), (p), 0)