 723e51327f
			
		
	
	
		723e51327f
		
	
	
	
	
		
			
			The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
		
			
				
	
	
		
			187 lines
		
	
	
		
			5.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			187 lines
		
	
	
		
			5.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* This file handles advisory file locking as required by POSIX.
 | |
|  *
 | |
|  * The entry points into this file are
 | |
|  *   lock_op:	perform locking operations for FCNTL system call
 | |
|  *   lock_revive: revive processes when a lock is released
 | |
|  */
 | |
| 
 | |
| #include "fs.h"
 | |
| #include <minix/com.h>
 | |
| #include <minix/u64.h>
 | |
| #include <fcntl.h>
 | |
| #include <unistd.h>
 | |
| #include "file.h"
 | |
| #include "scratchpad.h"
 | |
| #include "lock.h"
 | |
| #include "vnode.h"
 | |
| #include "param.h"
 | |
| 
 | |
| /*===========================================================================*
 | |
|  *				lock_op					     *
 | |
|  *===========================================================================*/
 | |
| int lock_op(f, req)
 | |
| struct filp *f;
 | |
| int req;			/* either F_SETLK or F_SETLKW */
 | |
| {
 | |
| /* Perform the advisory locking required by POSIX. */
 | |
| 
 | |
|   int r, ltype, i, conflict = 0, unlocking = 0;
 | |
|   mode_t mo;
 | |
|   off_t first, last;
 | |
|   struct flock flock;
 | |
|   struct file_lock *flp, *flp2, *empty;
 | |
| 
 | |
|   /* Fetch the flock structure from user space. */
 | |
|   r = sys_datacopy(who_e, (vir_bytes) scratch(fp).io.io_buffer, VFS_PROC_NR,
 | |
| 		   (vir_bytes) &flock, sizeof(flock));
 | |
|   if (r != OK) return(EINVAL);
 | |
| 
 | |
|   /* Make some error checks. */
 | |
|   ltype = flock.l_type;
 | |
|   mo = f->filp_mode;
 | |
|   if (ltype != F_UNLCK && ltype != F_RDLCK && ltype != F_WRLCK) return(EINVAL);
 | |
|   if (req == F_GETLK && ltype == F_UNLCK) return(EINVAL);
 | |
|   if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode))
 | |
| 	return(EINVAL);
 | |
|   if (req != F_GETLK && ltype == F_RDLCK && (mo & R_BIT) == 0) return(EBADF);
 | |
|   if (req != F_GETLK && ltype == F_WRLCK && (mo & W_BIT) == 0) return(EBADF);
 | |
| 
 | |
|   /* Compute the first and last bytes in the lock region. */
 | |
|   switch (flock.l_whence) {
 | |
|     case SEEK_SET:	first = 0; break;
 | |
|     case SEEK_CUR:	first = f->filp_pos; break;
 | |
|     case SEEK_END:	first = f->filp_vno->v_size; break;
 | |
|     default:	return(EINVAL);
 | |
|   }
 | |
| 
 | |
|   /* Check for overflow. */
 | |
|   if (((long) flock.l_start > 0) && ((first + flock.l_start) < first))
 | |
| 	return(EINVAL);
 | |
|   if (((long) flock.l_start < 0) && ((first + flock.l_start) > first))
 | |
| 	return(EINVAL);
 | |
|   first = first + flock.l_start;
 | |
|   last = first + flock.l_len - 1;
 | |
|   if (flock.l_len == 0) last = MAX_FILE_POS;
 | |
|   if (last < first) return(EINVAL);
 | |
| 
 | |
|   /* Check if this region conflicts with any existing lock. */
 | |
|   empty = NULL;
 | |
|   for (flp = &file_lock[0]; flp < &file_lock[NR_LOCKS]; flp++) {
 | |
| 	if (flp->lock_type == 0) {
 | |
| 		if (empty == NULL) empty = flp;
 | |
| 		continue;	/* 0 means unused slot */
 | |
| 	}
 | |
| 	if (flp->lock_vnode != f->filp_vno) continue;	/* different file */
 | |
| 	if (last < flp->lock_first) continue;	/* new one is in front */
 | |
| 	if (first > flp->lock_last) continue;	/* new one is afterwards */
 | |
| 	if (ltype == F_RDLCK && flp->lock_type == F_RDLCK) continue;
 | |
| 	if (ltype != F_UNLCK && flp->lock_pid == fp->fp_pid) continue;
 | |
| 
 | |
| 	/* There might be a conflict.  Process it. */
 | |
| 	conflict = 1;
 | |
| 	if (req == F_GETLK) break;
 | |
| 
 | |
| 	/* If we are trying to set a lock, it just failed. */
 | |
| 	if (ltype == F_RDLCK || ltype == F_WRLCK) {
 | |
| 		if (req == F_SETLK) {
 | |
| 			/* For F_SETLK, just report back failure. */
 | |
| 			return(EAGAIN);
 | |
| 		} else {
 | |
| 			/* For F_SETLKW, suspend the process. */
 | |
| 			suspend(FP_BLOCKED_ON_LOCK);
 | |
| 			return(SUSPEND);
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	/* We are clearing a lock and we found something that overlaps. */
 | |
| 	unlocking = 1;
 | |
| 	if (first <= flp->lock_first && last >= flp->lock_last) {
 | |
| 		flp->lock_type = 0;	/* mark slot as unused */
 | |
| 		nr_locks--;		/* number of locks is now 1 less */
 | |
| 		continue;
 | |
| 	}
 | |
| 
 | |
| 	/* Part of a locked region has been unlocked. */
 | |
| 	if (first <= flp->lock_first) {
 | |
| 		flp->lock_first = last + 1;
 | |
| 		continue;
 | |
| 	}
 | |
| 
 | |
| 	if (last >= flp->lock_last) {
 | |
| 		flp->lock_last = first - 1;
 | |
| 		continue;
 | |
| 	}
 | |
| 
 | |
| 	/* Bad luck. A lock has been split in two by unlocking the middle. */
 | |
| 	if (nr_locks == NR_LOCKS) return(ENOLCK);
 | |
| 	for (i = 0; i < NR_LOCKS; i++)
 | |
| 		if (file_lock[i].lock_type == 0) break;
 | |
| 	flp2 = &file_lock[i];
 | |
| 	flp2->lock_type = flp->lock_type;
 | |
| 	flp2->lock_pid = flp->lock_pid;
 | |
| 	flp2->lock_vnode = flp->lock_vnode;
 | |
| 	flp2->lock_first = last + 1;
 | |
| 	flp2->lock_last = flp->lock_last;
 | |
| 	flp->lock_last = first - 1;
 | |
| 	nr_locks++;
 | |
|   }
 | |
|   if (unlocking) lock_revive();
 | |
| 
 | |
|   if (req == F_GETLK) {
 | |
| 	if (conflict) {
 | |
| 		/* GETLK and conflict. Report on the conflicting lock. */
 | |
| 		flock.l_type = flp->lock_type;
 | |
| 		flock.l_whence = SEEK_SET;
 | |
| 		flock.l_start = flp->lock_first;
 | |
| 		flock.l_len = flp->lock_last - flp->lock_first + 1;
 | |
| 		flock.l_pid = flp->lock_pid;
 | |
| 
 | |
| 	} else {
 | |
| 		/* It is GETLK and there is no conflict. */
 | |
| 		flock.l_type = F_UNLCK;
 | |
| 	}
 | |
| 
 | |
| 	/* Copy the flock structure back to the caller. */
 | |
| 	r = sys_datacopy(VFS_PROC_NR, (vir_bytes) &flock, who_e,
 | |
| 		(vir_bytes) scratch(fp).io.io_buffer, sizeof(flock));
 | |
| 	return(r);
 | |
|   }
 | |
| 
 | |
|   if (ltype == F_UNLCK) return(OK);	/* unlocked a region with no locks */
 | |
| 
 | |
|   /* There is no conflict.  If space exists, store new lock in the table. */
 | |
|   if (empty == NULL) return(ENOLCK);	/* table full */
 | |
|   empty->lock_type = ltype;
 | |
|   empty->lock_pid = fp->fp_pid;
 | |
|   empty->lock_vnode = f->filp_vno;
 | |
|   empty->lock_first = first;
 | |
|   empty->lock_last = last;
 | |
|   nr_locks++;
 | |
|   return(OK);
 | |
| }
 | |
| 
 | |
| 
 | |
| /*===========================================================================*
 | |
|  *				lock_revive				     *
 | |
|  *===========================================================================*/
 | |
| void lock_revive()
 | |
| {
 | |
| /* Go find all the processes that are waiting for any kind of lock and
 | |
|  * revive them all.  The ones that are still blocked will block again when
 | |
|  * they run.  The others will complete.  This strategy is a space-time
 | |
|  * tradeoff.  Figuring out exactly which ones to unblock now would take
 | |
|  * extra code, and the only thing it would win would be some performance in
 | |
|  * extremely rare circumstances (namely, that somebody actually used
 | |
|  * locking).
 | |
|  */
 | |
| 
 | |
|   struct fproc *fptr;
 | |
| 
 | |
|   for (fptr = &fproc[0]; fptr < &fproc[NR_PROCS]; fptr++){
 | |
| 	if (fptr->fp_pid == PID_FREE) continue;
 | |
| 	if (fptr->fp_blocked_on == FP_BLOCKED_ON_LOCK) {
 | |
| 		revive(fptr->fp_endpoint, 0);
 | |
| 	}
 | |
|   }
 | |
| }
 |