 6aa26565e6
			
		
	
	
		6aa26565e6
		
	
	
	
	
		
			
			- to isolate execution inside kernel we use a big kernel lock implemented as a spinlock - the lock is acquired asap after entering kernel mode and released as late as possible. Only one CPU as a time can execute the core kernel code - measurement son real hw show that the overhead of this lock is close to 0% of kernel time for the currnet system - the overhead of this lock may be as high as 45% of kernel time in virtual machines depending on the ratio between physical CPUs available and emulated CPUs. The performance degradation is significant
		
			
				
	
	
		
			42 lines
		
	
	
		
			955 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			42 lines
		
	
	
		
			955 B
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef __SPINLOCK_H__
 | |
| #define __SPINLOCK_H__
 | |
| 
 | |
| #include "kernel.h"
 | |
| 
 | |
| typedef struct spinlock {
 | |
| 	atomic_t val;
 | |
| } spinlock_t;
 | |
| 
 | |
| #ifndef CONFIG_SMP
 | |
| 
 | |
| #define SPINLOCK_DEFINE(name)
 | |
| #define PRIVATE_SPINLOCK_DEFINE(name)
 | |
| #define SPINLOCK_DECLARE(name)
 | |
| #define spinlock_init(sl)
 | |
| #define spinlock_lock(sl)
 | |
| #define spinlock_unlock(sl)
 | |
| 
 | |
| #else
 | |
| 
 | |
| /* SMP */
 | |
| #define SPINLOCK_DEFINE(name)	spinlock_t name;
 | |
| #define PRIVATE_SPINLOCK_DEFINE(name)	PRIVATE SPINLOCK_DEFINE(name)
 | |
| #define SPINLOCK_DECLARE(name)	extern SPINLOCK_DEFINE(name)
 | |
| #define spinlock_init(sl) do { (sl)->val = 0; } while (0)
 | |
| 
 | |
| #if CONFIG_MAX_CPUS == 1
 | |
| #define spinlock_lock(sl)
 | |
| #define spinlock_unlock(sl)
 | |
| #else
 | |
| #define spinlock_lock(sl)	arch_spinlock_lock((atomic_t*) sl)
 | |
| #define spinlock_unlock(sl)	arch_spinlock_unlock((atomic_t*) sl)
 | |
| #endif
 | |
| 
 | |
| 
 | |
| #endif /* CONFIG_SMP */
 | |
| 
 | |
| #define BKL_LOCK()	spinlock_lock(&big_kernel_lock)
 | |
| #define BKL_UNLOCK()	spinlock_unlock(&big_kernel_lock)
 | |
| 
 | |
| #endif /* __SPINLOCK_H__ */
 |