mirror of
https://github.com/wichtounet/thor-os.git
synced 2025-08-03 17:26:08 -04:00
Clean concurrency utilities
This commit is contained in:
parent
a515261ed4
commit
6eb0a44a74
@ -5,8 +5,8 @@
|
||||
// http://www.opensource.org/licenses/MIT)
|
||||
//=======================================================================
|
||||
|
||||
#ifndef SLEEP_QUEUE_H
|
||||
#define SLEEP_QUEUE_H
|
||||
#ifndef CONDITION_VARIABLE_H
|
||||
#define CONDITION_VARIABLE_H
|
||||
|
||||
#include <circular_buffer.hpp>
|
||||
#include <lock_guard.hpp>
|
||||
@ -18,12 +18,7 @@
|
||||
/*!
|
||||
* \brief A simple sleep queue
|
||||
*/
|
||||
struct sleep_queue {
|
||||
private:
|
||||
mutable spinlock lock;
|
||||
circular_buffer<scheduler::pid_t, 16> queue;
|
||||
|
||||
public:
|
||||
struct condition_variable {
|
||||
/*!
|
||||
* \brief Test if the sleep queue is empty
|
||||
*/
|
||||
@ -58,6 +53,10 @@ public:
|
||||
* \return true if the thread was woken up, false if the timeout is passed
|
||||
*/
|
||||
bool sleep(size_t ms);
|
||||
|
||||
private:
|
||||
mutable spinlock lock; ///< The spin lock used for protecting the queue
|
||||
circular_buffer<scheduler::pid_t, 16> queue; ///< The queue of waiting threads
|
||||
};
|
||||
|
||||
#endif
|
@ -12,32 +12,51 @@
|
||||
|
||||
#include "arch.hpp"
|
||||
|
||||
/*!
|
||||
* \brief An interrupt lock. This lock disable preemption on acquire.
|
||||
*/
|
||||
struct int_lock {
|
||||
private:
|
||||
size_t rflags;
|
||||
|
||||
public:
|
||||
/*!
|
||||
* \brief Acquire the lock. This will disable preemption.
|
||||
*/
|
||||
void acquire(){
|
||||
arch::disable_hwint(rflags);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Release the lock. This will enable preemption.
|
||||
*/
|
||||
void release(){
|
||||
arch::enable_hwint(rflags);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t rflags; ///< The CPU flags
|
||||
};
|
||||
|
||||
/*!
|
||||
* \brief A direct interrupt lock (RAII).
|
||||
*
|
||||
* This is the equivalent of a std::lock_guard<int_lock> but does not need to
|
||||
* store a lock.
|
||||
*/
|
||||
struct direct_int_lock {
|
||||
private:
|
||||
int_lock lock;
|
||||
|
||||
public:
|
||||
/*!
|
||||
* \brief Construct a new direct_int_lock and acquire the lock.
|
||||
*/
|
||||
direct_int_lock(){
|
||||
lock.acquire();
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Destruct a direct_int_lock and release the lock.
|
||||
*/
|
||||
~direct_int_lock(){
|
||||
lock.release();
|
||||
}
|
||||
|
||||
private:
|
||||
int_lock lock; ///< The interrupt lock
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -16,72 +16,66 @@
|
||||
#include "scheduler.hpp"
|
||||
#include "logging.hpp"
|
||||
|
||||
template<bool Debug = false>
|
||||
/*!
|
||||
* \brief A mutex implementation.
|
||||
*
|
||||
* Once the lock is acquired, the critical section is only accessible by the
|
||||
* thread who acquired the mutex.
|
||||
*/
|
||||
struct mutex {
|
||||
private:
|
||||
mutable spinlock lock;
|
||||
volatile size_t value;
|
||||
circular_buffer<scheduler::pid_t, 16> queue;
|
||||
const char* name;
|
||||
|
||||
public:
|
||||
/*!
|
||||
* \brief Initialize the mutex (either to 1 or 0)
|
||||
* \param v The intial value of the mutex
|
||||
*/
|
||||
void init(size_t v = 1){
|
||||
value = v;
|
||||
|
||||
if(Debug){
|
||||
name = "";
|
||||
if(v > 1){
|
||||
value = 1;
|
||||
} else {
|
||||
value = v;
|
||||
}
|
||||
}
|
||||
|
||||
void set_name(const char* name){
|
||||
this->name = name;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Acquire the lock
|
||||
*/
|
||||
void acquire(){
|
||||
lock.acquire();
|
||||
|
||||
if(value > 0){
|
||||
value = 0;
|
||||
|
||||
if(Debug){
|
||||
logging::logf(logging::log_level::TRACE, "%s(mutex): directly acquired (process %d)\n", name, scheduler::get_pid());
|
||||
}
|
||||
|
||||
lock.release();
|
||||
} else {
|
||||
auto pid = scheduler::get_pid();
|
||||
queue.push(pid);
|
||||
|
||||
if(Debug){
|
||||
logging::logf(logging::log_level::TRACE, "%s(mutex): wait %d\n", name, pid);
|
||||
}
|
||||
|
||||
scheduler::block_process_light(pid);
|
||||
lock.release();
|
||||
scheduler::reschedule();
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Acquire the lock
|
||||
*/
|
||||
void release(){
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
if(queue.empty()){
|
||||
value = 1;
|
||||
if(Debug){
|
||||
logging::logf(logging::log_level::TRACE, "%s(mutex): direct release (process %d)\n", name, scheduler::get_pid());
|
||||
}
|
||||
} else {
|
||||
auto pid = queue.pop();
|
||||
scheduler::unblock_process(pid);
|
||||
|
||||
if(Debug){
|
||||
logging::logf(logging::log_level::TRACE, "%s(mutex): wake %d\n", name, pid);
|
||||
}
|
||||
|
||||
//No need to increment value, the process won't
|
||||
//decrement it
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
mutable spinlock lock; ///< The spin protecting the value
|
||||
volatile size_t value; ///< The value of the mutex
|
||||
circular_buffer<scheduler::pid_t, 16> queue; ///< The sleep queue
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -14,17 +14,26 @@
|
||||
#include "spinlock.hpp"
|
||||
#include "scheduler.hpp"
|
||||
|
||||
/*!
|
||||
* \brief A semaphore implementation.
|
||||
*
|
||||
* The critical section can be open to several processes.
|
||||
*/
|
||||
struct semaphore {
|
||||
private:
|
||||
mutable spinlock lock;
|
||||
volatile size_t value;
|
||||
circular_buffer<scheduler::pid_t, 16> queue;
|
||||
|
||||
public:
|
||||
/*!
|
||||
* \brief Initialize the semaphore
|
||||
* \param v The intial value of the semaphore
|
||||
*/
|
||||
void init(size_t v){
|
||||
value = v;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Acquire the lock.
|
||||
*
|
||||
* This will effectively decrease the current counter by 1 once the critical
|
||||
* section is entered.
|
||||
*/
|
||||
void acquire(){
|
||||
lock.acquire();
|
||||
|
||||
@ -41,6 +50,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Release the lock.
|
||||
*
|
||||
* This will effectively increase the current counter by 1 once the critical
|
||||
* section is left.
|
||||
*/
|
||||
void release(){
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
@ -56,23 +71,34 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void release(size_t v){
|
||||
/*!
|
||||
* \brief Release the lock several times.
|
||||
*
|
||||
* This will effectively increase the current counter by n once the critical
|
||||
* section is left.
|
||||
*/
|
||||
void release(size_t n){
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
if(queue.empty()){
|
||||
value += v;
|
||||
value += n;
|
||||
} else {
|
||||
while(v && !queue.empty()){
|
||||
while(n && !queue.empty()){
|
||||
auto pid = queue.pop();
|
||||
scheduler::unblock_process(pid);
|
||||
--v;
|
||||
--n;
|
||||
}
|
||||
|
||||
if(v){
|
||||
value += v;
|
||||
if(n){
|
||||
value += n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
mutable spinlock lock; ///< The spin lock protecting the counter
|
||||
volatile size_t value; ///< The value of the counter
|
||||
circular_buffer<scheduler::pid_t, 16> queue; ///< The sleep queue
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -8,21 +8,33 @@
|
||||
#ifndef SPINLOCK_H
|
||||
#define SPINLOCK_H
|
||||
|
||||
/*!
|
||||
* \brief Implementation of a spinlock
|
||||
*
|
||||
* A spinlock simply waits in a loop until the lock is available.
|
||||
*/
|
||||
struct spinlock {
|
||||
private:
|
||||
volatile size_t lock = 0;
|
||||
|
||||
public:
|
||||
/*!
|
||||
* \brief Acquire the lock.
|
||||
*
|
||||
* This will wait indefinitely.
|
||||
*/
|
||||
void acquire(){
|
||||
while(!__sync_bool_compare_and_swap(&lock, 0, 1));
|
||||
__sync_synchronize();
|
||||
//TODO The last synchronize is probably not necessary
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Release the lock
|
||||
*/
|
||||
void release(){
|
||||
__sync_synchronize();
|
||||
lock = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
volatile size_t lock = 0; ///< The value of the lock
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -37,7 +37,7 @@ struct interface_descriptor {
|
||||
network::ip::address ip_address; ///< The interface IP address
|
||||
network::ip::address gateway; ///< The interface IP gateway
|
||||
|
||||
mutable mutex<> tx_lock; //To synchronize the queue
|
||||
mutable mutex tx_lock; //To synchronize the queue
|
||||
mutable semaphore tx_sem;
|
||||
mutable semaphore rx_sem;
|
||||
|
||||
@ -50,7 +50,7 @@ struct interface_descriptor {
|
||||
void (*hw_send)(interface_descriptor&, ethernet::packet& p);
|
||||
|
||||
void send(ethernet::packet& p){
|
||||
std::lock_guard<mutex<>> l(tx_lock);
|
||||
std::lock_guard<mutex> l(tx_lock);
|
||||
tx_queue.push(p);
|
||||
tx_sem.release();
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
#include "tlib/net_constants.hpp"
|
||||
|
||||
#include "conc/sleep_queue.hpp"
|
||||
#include "conc/condition_variable.hpp"
|
||||
|
||||
#include "net/ethernet_packet.hpp"
|
||||
|
||||
@ -41,7 +41,7 @@ struct socket {
|
||||
std::vector<network::ethernet::packet> packets;
|
||||
|
||||
circular_buffer<network::ethernet::packet, 32> listen_packets;
|
||||
sleep_queue listen_queue;
|
||||
condition_variable listen_queue;
|
||||
|
||||
socket(){}
|
||||
socket(size_t id, socket_domain domain, socket_type type, socket_protocol protocol, size_t next_fd, bool listen)
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#include <tlib/keycode.hpp>
|
||||
|
||||
#include "conc/sleep_queue.hpp"
|
||||
#include "conc/condition_variable.hpp"
|
||||
|
||||
namespace stdio {
|
||||
|
||||
@ -35,7 +35,7 @@ struct virtual_terminal {
|
||||
circular_buffer<char, 2 * INPUT_BUFFER_SIZE> canonical_buffer;
|
||||
circular_buffer<size_t, 3 * INPUT_BUFFER_SIZE> raw_buffer;
|
||||
|
||||
sleep_queue input_queue;
|
||||
condition_variable input_queue;
|
||||
|
||||
void print(char c);
|
||||
|
||||
|
@ -5,25 +5,25 @@
|
||||
// http://www.opensource.org/licenses/MIT)
|
||||
//=======================================================================
|
||||
|
||||
#include "conc/sleep_queue.hpp"
|
||||
#include "conc/condition_variable.hpp"
|
||||
|
||||
#include "scheduler.hpp"
|
||||
#include "logging.hpp"
|
||||
#include "assert.hpp"
|
||||
|
||||
bool sleep_queue::empty() const {
|
||||
bool condition_variable::empty() const {
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
return queue.empty();
|
||||
}
|
||||
|
||||
scheduler::pid_t sleep_queue::top_process() const {
|
||||
scheduler::pid_t condition_variable::top_process() const {
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
return queue.top();
|
||||
}
|
||||
|
||||
scheduler::pid_t sleep_queue::wake_up() {
|
||||
scheduler::pid_t condition_variable::wake_up() {
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
while (!queue.empty()) {
|
||||
@ -34,7 +34,7 @@ scheduler::pid_t sleep_queue::wake_up() {
|
||||
queue.pop();
|
||||
|
||||
if (pid != scheduler::INVALID_PID) {
|
||||
logging::logf(logging::log_level::TRACE, "sleep_queue: wake %d\n", pid);
|
||||
logging::logf(logging::log_level::TRACE, "condition_variable: wake %d\n", pid);
|
||||
|
||||
// Indicate to the scheduler that this process will be able to run
|
||||
// We use a hint here because it is possible that the thread was
|
||||
@ -48,7 +48,7 @@ scheduler::pid_t sleep_queue::wake_up() {
|
||||
return scheduler::INVALID_PID;
|
||||
}
|
||||
|
||||
void sleep_queue::wake_up_all() {
|
||||
void condition_variable::wake_up_all() {
|
||||
std::lock_guard<spinlock> l(lock);
|
||||
|
||||
while (!queue.empty()) {
|
||||
@ -59,7 +59,7 @@ void sleep_queue::wake_up_all() {
|
||||
queue.pop();
|
||||
|
||||
if (pid != scheduler::INVALID_PID) {
|
||||
logging::logf(logging::log_level::TRACE, "sleep_queue: wake(all) %d\n", pid);
|
||||
logging::logf(logging::log_level::TRACE, "condition_variable: wake(all) %d\n", pid);
|
||||
|
||||
// Indicate to the scheduler that this process will be able to run
|
||||
// We use a hint here because it is possible that the thread was
|
||||
@ -69,18 +69,18 @@ void sleep_queue::wake_up_all() {
|
||||
}
|
||||
}
|
||||
|
||||
void sleep_queue::sleep() {
|
||||
void condition_variable::sleep() {
|
||||
lock.acquire();
|
||||
|
||||
//Get the current process information
|
||||
auto pid = scheduler::get_pid();
|
||||
|
||||
logging::logf(logging::log_level::TRACE, "sleep_queue: wait %d\n", pid);
|
||||
logging::logf(logging::log_level::TRACE, "condition_variable: wait %d\n", pid);
|
||||
|
||||
//Enqueue the process in the sleep queue
|
||||
queue.push(pid);
|
||||
|
||||
thor_assert(!queue.full(), "The sleep_queue queue is full!");
|
||||
thor_assert(!queue.full(), "The condition_variable queue is full!");
|
||||
|
||||
//This process will sleep
|
||||
scheduler::block_process_light(pid);
|
||||
@ -90,7 +90,7 @@ void sleep_queue::sleep() {
|
||||
scheduler::reschedule();
|
||||
}
|
||||
|
||||
bool sleep_queue::sleep(size_t ms) {
|
||||
bool condition_variable::sleep(size_t ms) {
|
||||
if (!ms) {
|
||||
return false;
|
||||
}
|
||||
@ -100,12 +100,12 @@ bool sleep_queue::sleep(size_t ms) {
|
||||
//Get the current process information
|
||||
auto pid = scheduler::get_pid();
|
||||
|
||||
logging::logf(logging::log_level::TRACE, "sleep_queue: %u wait with timeout %u\n", pid, ms);
|
||||
logging::logf(logging::log_level::TRACE, "condition_variable: %u wait with timeout %u\n", pid, ms);
|
||||
|
||||
//Enqueue the process in the sleep queue
|
||||
queue.push(pid);
|
||||
|
||||
thor_assert(!queue.full(), "The sleep_queue queue is full!");
|
||||
thor_assert(!queue.full(), "The condition_variable queue is full!");
|
||||
|
||||
//This process will sleep
|
||||
scheduler::block_process_timeout_light(pid, ms);
|
@ -28,10 +28,10 @@ static constexpr const size_t BLOCK_SIZE = 512;
|
||||
|
||||
ata::drive_descriptor* drives;
|
||||
|
||||
mutex<> ata_lock;
|
||||
mutex ata_lock;
|
||||
|
||||
mutex<> primary_lock;
|
||||
mutex<> secondary_lock;
|
||||
mutex primary_lock;
|
||||
mutex secondary_lock;
|
||||
|
||||
block_cache cache;
|
||||
|
||||
@ -350,10 +350,6 @@ void ata::detect_disks(){
|
||||
primary_lock.init(0);
|
||||
secondary_lock.init(0);
|
||||
|
||||
ata_lock.set_name("ata_lock");
|
||||
primary_lock.set_name("ata_primary_lock");
|
||||
secondary_lock.set_name("ata_secondary_lock");
|
||||
|
||||
// Init the cache with 256 blocks
|
||||
cache.init(BLOCK_SIZE, 256);
|
||||
|
||||
|
@ -14,14 +14,14 @@
|
||||
#include "net/arp_cache.hpp"
|
||||
#include "net/ip_layer.hpp"
|
||||
|
||||
#include "conc/sleep_queue.hpp"
|
||||
#include "conc/condition_variable.hpp"
|
||||
|
||||
#include "logging.hpp"
|
||||
#include "kernel_utils.hpp"
|
||||
|
||||
namespace {
|
||||
|
||||
sleep_queue wait_queue;
|
||||
condition_variable wait_queue;
|
||||
|
||||
} //end of anonymous namespace
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <atomic.hpp>
|
||||
#include <list.hpp>
|
||||
|
||||
#include "conc/sleep_queue.hpp"
|
||||
#include "conc/condition_variable.hpp"
|
||||
|
||||
#include "net/tcp_layer.hpp"
|
||||
#include "net/dns_layer.hpp"
|
||||
@ -41,7 +41,7 @@ struct tcp_connection {
|
||||
size_t target_port;
|
||||
|
||||
std::atomic<bool> listening;
|
||||
sleep_queue queue;
|
||||
condition_variable queue;
|
||||
circular_buffer<network::ethernet::packet, 8> packets;
|
||||
|
||||
tcp_connection(size_t source_port, size_t target_port)
|
||||
|
Loading…
x
Reference in New Issue
Block a user