mirror of
https://github.com/TecharoHQ/anubis.git
synced 2025-08-03 17:59:24 -04:00

Possible fix for #877 In some cases, the parallel solution finder in Anubis could cause all of the worker promises to leak due to the fact the promises were being improperly terminated. A recursion bomb happens in the following scenario: 1. A worker sends a message indicating it found a solution to the proof of work challenge. 2. The `onmessage` handler for that worker calls `terminate()` 3. Inside `terminate()`, the parent process loops through all other workers and calls `w.terminate()` on them. 4. It's possible that terminating a worker could lead to the `onerror` event handler. 5. This would create a recursive loop of `onmessage` -> `terminate` -> `onerror` -> `terminate` -> `onerror` and so on. This infinite recursion quickly consumes all available stack space, but this has never been noticed in development because all of my computers have at least 64Gi of ram provisioned to them under the axiom paying for more ram is cheaper than paying in my time spent having to work around not having enough ram. Additionally, ia32 has a smaller base stack size, which means that they will run into this issue much sooner than users on other CPU architectures will. The fix adds a boolean `settled` flag to prevent termination from running more than once. Signed-off-by: Xe Iaso <me@xeiaso.net>
143 lines
3.6 KiB
JavaScript
143 lines
3.6 KiB
JavaScript
export default function process(
|
|
data,
|
|
difficulty = 5,
|
|
signal = null,
|
|
progressCallback = null,
|
|
threads = navigator.hardwareConcurrency || 1,
|
|
) {
|
|
console.debug("fast algo");
|
|
return new Promise((resolve, reject) => {
|
|
let webWorkerURL = URL.createObjectURL(
|
|
new Blob(["(", processTask(), ")()"], { type: "application/javascript" }),
|
|
);
|
|
|
|
const workers = [];
|
|
let settled = false;
|
|
|
|
const cleanup = () => {
|
|
if (settled) {
|
|
return;
|
|
}
|
|
settled = true;
|
|
workers.forEach((w) => w.terminate());
|
|
if (signal != null) {
|
|
signal.removeEventListener("abort", onAbort);
|
|
}
|
|
URL.revokeObjectURL(webWorkerURL);
|
|
};
|
|
|
|
const onAbort = () => {
|
|
console.log("PoW aborted");
|
|
cleanup();
|
|
reject(new DOMException("Aborted", "AbortError"));
|
|
};
|
|
|
|
if (signal != null) {
|
|
if (signal.aborted) {
|
|
return onAbort();
|
|
}
|
|
signal.addEventListener("abort", onAbort, { once: true });
|
|
}
|
|
|
|
for (let i = 0; i < threads; i++) {
|
|
let worker = new Worker(webWorkerURL);
|
|
|
|
worker.onmessage = (event) => {
|
|
if (typeof event.data === "number") {
|
|
progressCallback?.(event.data);
|
|
} else {
|
|
cleanup();
|
|
resolve(event.data);
|
|
}
|
|
};
|
|
|
|
worker.onerror = (event) => {
|
|
cleanup();
|
|
reject(event);
|
|
};
|
|
|
|
worker.postMessage({
|
|
data,
|
|
difficulty,
|
|
nonce: i,
|
|
threads,
|
|
});
|
|
|
|
workers.push(worker);
|
|
}
|
|
});
|
|
}
|
|
|
|
function processTask() {
|
|
return function () {
|
|
const sha256 = (text) => {
|
|
const encoded = new TextEncoder().encode(text);
|
|
return crypto.subtle.digest("SHA-256", encoded.buffer);
|
|
};
|
|
|
|
function uint8ArrayToHexString(arr) {
|
|
return Array.from(arr)
|
|
.map((c) => c.toString(16).padStart(2, "0"))
|
|
.join("");
|
|
}
|
|
|
|
addEventListener("message", async (event) => {
|
|
let data = event.data.data;
|
|
let difficulty = event.data.difficulty;
|
|
let hash;
|
|
let nonce = event.data.nonce;
|
|
let threads = event.data.threads;
|
|
|
|
const threadId = nonce;
|
|
|
|
while (true) {
|
|
const currentHash = await sha256(data + nonce);
|
|
const thisHash = new Uint8Array(currentHash);
|
|
let valid = true;
|
|
|
|
for (let j = 0; j < difficulty; j++) {
|
|
const byteIndex = Math.floor(j / 2); // which byte we are looking at
|
|
const nibbleIndex = j % 2; // which nibble in the byte we are looking at (0 is high, 1 is low)
|
|
|
|
let nibble =
|
|
(thisHash[byteIndex] >> (nibbleIndex === 0 ? 4 : 0)) & 0x0f; // Get the nibble
|
|
|
|
if (nibble !== 0) {
|
|
valid = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (valid) {
|
|
hash = uint8ArrayToHexString(thisHash);
|
|
console.log(hash);
|
|
break;
|
|
}
|
|
|
|
const oldNonce = nonce;
|
|
nonce += threads;
|
|
|
|
// send a progress update every 1024 iterations. since each thread checks
|
|
// separate values, one simple way to do this is by bit masking the
|
|
// nonce for multiples of 1024. unfortunately, if the number of threads
|
|
// is not prime, only some of the threads will be sending the status
|
|
// update and they will get behind the others. this is slightly more
|
|
// complicated but ensures an even distribution between threads.
|
|
if (
|
|
(nonce > oldNonce) | 1023 && // we've wrapped past 1024
|
|
(nonce >> 10) % threads === threadId // and it's our turn
|
|
) {
|
|
postMessage(nonce);
|
|
}
|
|
}
|
|
|
|
postMessage({
|
|
hash,
|
|
data,
|
|
difficulty,
|
|
nonce,
|
|
});
|
|
});
|
|
}.toString();
|
|
}
|