The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
		
			
				
	
	
		
			42 lines
		
	
	
		
			966 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			42 lines
		
	
	
		
			966 B
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef __VFS_WORKERS_H__
 | 
						|
#define __VFS_WORKERS_H__
 | 
						|
#include <minix/mthread.h>
 | 
						|
 | 
						|
#define thread_t	mthread_thread_t
 | 
						|
#define mutex_t		mthread_mutex_t
 | 
						|
#define cond_t		mthread_cond_t
 | 
						|
#define attr_t		mthread_attr_t
 | 
						|
 | 
						|
#define threads_init	mthread_init
 | 
						|
#define yield		mthread_yield
 | 
						|
#define yield_all	mthread_yield_all
 | 
						|
 | 
						|
#define mutex_init	mthread_mutex_init
 | 
						|
#define mutex_destroy	mthread_mutex_destroy
 | 
						|
#define mutex_lock	mthread_mutex_lock
 | 
						|
#define mutex_trylock	mthread_mutex_trylock
 | 
						|
#define mutex_unlock	mthread_mutex_unlock
 | 
						|
 | 
						|
#define cond_init	mthread_cond_init
 | 
						|
#define cond_destroy	mthread_cond_destroy
 | 
						|
#define cond_wait	mthread_cond_wait
 | 
						|
#define cond_signal	mthread_cond_signal
 | 
						|
 | 
						|
struct fproc;
 | 
						|
 | 
						|
struct worker_thread {
 | 
						|
  thread_t w_tid;
 | 
						|
  mutex_t w_event_mutex;
 | 
						|
  cond_t w_event;
 | 
						|
  struct fproc *w_fp;
 | 
						|
  message w_msg;
 | 
						|
  int w_err_code;
 | 
						|
  message *w_fs_sendrec;
 | 
						|
  message *w_drv_sendrec;
 | 
						|
  endpoint_t w_task;
 | 
						|
  struct dmap *w_dmap;
 | 
						|
  struct worker_thread *w_next;
 | 
						|
};
 | 
						|
 | 
						|
#endif
 |