More random fixes.

Note to self: Zig is cursed https://github.com/ziglang/zig/issues/16311
This commit is contained in:
IntegratedQuantum 2023-07-03 22:12:52 +02:00
parent 649eaf30a1
commit f5220cfe9f
3 changed files with 31 additions and 3 deletions

View File

@ -162,7 +162,9 @@ pub fn deinit() void {
handleError(c.Pa_StopStream(stream));
handleError(c.Pa_CloseStream(stream));
handleError(c.Pa_Terminate());
main.threadPool.closeAllTasksOfType(&MusicLoadTask.vtable);
musicCache.clear();
activeTasks.deinit(main.globalAllocator);
activeMusicId.len = 0;
}

View File

@ -102,7 +102,7 @@ fn setButtonPosFromValue(self: *DiscreteSlider) !void {
fn updateLabel(self: *DiscreteSlider, newValue: []const u8, width: f32) !void {
main.globalAllocator.free(self.currentText);
self.currentText = try main.globalAllocator.alloc(u8, newValue.len + self.text.len);
@memcpy(self.currentText[self.text.len..], self.text);
@memcpy(self.currentText[0..self.text.len], self.text);
@memcpy(self.currentText[self.text.len..], newValue);
const label = try Label.init(undefined, width - 3*border, self.currentText, .center);
self.label.deinit();

View File

@ -525,17 +525,19 @@ pub const ThreadPool = struct {
const refreshTime: u32 = 100; // The time after which all priorities get refreshed in milliseconds.
threads: []std.Thread,
currentTasks: []std.atomic.Atomic(?*const VTable),
loadList: *BlockingMaxHeap(Task),
allocator: Allocator,
pub fn init(allocator: Allocator, threadCount: usize) !ThreadPool {
var self = ThreadPool {
.threads = try allocator.alloc(std.Thread, threadCount),
.currentTasks = try allocator.alloc(std.atomic.Atomic(?*const VTable), threadCount),
.loadList = try BlockingMaxHeap(Task).init(allocator),
.allocator = allocator,
};
for(self.threads, 0..) |*thread, i| {
thread.* = try std.Thread.spawn(.{}, run, .{self});
thread.* = try std.Thread.spawn(.{}, run, .{self, i});
var buf: [64]u8 = undefined;
try thread.setName(try std.fmt.bufPrint(&buf, "Worker Thread {}", .{i+1}));
}
@ -554,10 +556,32 @@ pub const ThreadPool = struct {
for(self.threads) |thread| {
thread.join();
}
self.allocator.free(self.currentTasks);
self.allocator.free(self.threads);
}
fn run(self: ThreadPool) !void {
pub fn closeAllTasksOfType(self: ThreadPool, vtable: *const VTable) void {
self.loadList.mutex.lock();
defer self.loadList.mutex.unlock();
var i: u32 = 0;
while(i < self.loadList.size) {
const task = &self.loadList.array[i];
if(task.vtable == vtable) {
task.vtable.clean(task.self);
self.loadList.removeIndex(i);
} else {
i += 1;
}
}
// Wait for active tasks:
for(self.currentTasks) |*task| {
while(task.load(.Monotonic) == vtable) {
std.time.sleep(1e6);
}
}
}
fn run(self: ThreadPool, id: usize) !void {
// In case any of the tasks wants to allocate memory:
var gpa = std.heap.GeneralPurposeAllocator(.{.thread_safe=false}){};
main.threadAllocator = gpa.allocator();
@ -569,7 +593,9 @@ pub const ThreadPool = struct {
while(true) {
{
var task = self.loadList.extractMax() catch break;
self.currentTasks[id].store(task.vtable, .Monotonic);
try task.vtable.run(task.self);
self.currentTasks[id].store(null, .Monotonic);
}
if(std.time.milliTimestamp() -% lastUpdate > refreshTime) {