diff --git a/docs/docs/CHANGELOG.md b/docs/docs/CHANGELOG.md index 257f1cc..c4779e4 100644 --- a/docs/docs/CHANGELOG.md +++ b/docs/docs/CHANGELOG.md @@ -26,6 +26,17 @@ Anubis now supports the [`missingHeader`](./admin/configuration/expressions.mdx# ### Fixes +#### Fix event loop thrashing when solving a proof of work challenge + +Previously the "fast" proof of work solver had a fragment of JavaScript that attempted to only post an update about proof of work progress to the main browser window every 1024 iterations. This fragment of JavaScript was subtly incorrect in a way that passed review but actually made the workers send an update back to the main thread every iteration. This caused a pileup of unhandled async calls (similar to a socket accept() backlog pileup in Unix) that caused stack space exhaustion. + +This has been fixed in the following ways: + +1. The complicated boolean logic has been totally removed in favour of a worker-local iteration counter. +2. The progress bar is updated by worker `0` instead of all workers. + +Hopefully this should limit the event loop thrashing and let ia32 browsers (as well as any environment with a smaller stack size than amd64 and aarch64 seem to have) function normally when processing Anubis proof of work challenges. + #### Fix potential memory leak when discovering a solution In some cases, the parallel solution finder in Anubis could cause all of the worker promises to leak due to the fact the promises were being improperly terminated. This was fixed by having Anubis debounce worker termination instead of allowing it to potentially recurse infinitely. diff --git a/web/js/proof-of-work.mjs b/web/js/proof-of-work.mjs index 25bdd86..d70b9ee 100644 --- a/web/js/proof-of-work.mjs +++ b/web/js/proof-of-work.mjs @@ -3,7 +3,7 @@ export default function process( difficulty = 5, signal = null, progressCallback = null, - threads = navigator.hardwareConcurrency || 1, + threads = Math.max(navigator.hardwareConcurrency / 2, 1), ) { console.debug("fast algo"); return new Promise((resolve, reject) => { @@ -89,6 +89,7 @@ function processTask() { let threads = event.data.threads; const threadId = nonce; + let localIterationCount = 0; while (true) { const currentHash = await sha256(data + nonce); @@ -114,21 +115,15 @@ function processTask() { break; } - const oldNonce = nonce; nonce += threads; - // send a progress update every 1024 iterations. since each thread checks - // separate values, one simple way to do this is by bit masking the - // nonce for multiples of 1024. unfortunately, if the number of threads - // is not prime, only some of the threads will be sending the status - // update and they will get behind the others. this is slightly more - // complicated but ensures an even distribution between threads. - if ( - (nonce > oldNonce) | 1023 && // we've wrapped past 1024 - (nonce >> 10) % threads === threadId // and it's our turn - ) { + // send a progress update every 1024 iterations so that the user can be informed of + // the state of the challenge. + if (threadId == 0 && localIterationCount === 1024) { postMessage(nonce); + localIterationCount = 0; } + localIterationCount++; } postMessage({