anubis/web/js/proof-of-work.mjs
Xe Iaso e69fadddf1
fix(web/fast): remove event loop thrashing (#880)
Fixes #877

Continued from #879, event loop thrashing can cause stack space
exhaustion on ia32 systems. Previously this would thrash the event loop
in Firefox and Firefox derived browsers such as Pale Moon. I suspect
that this is the ultimate root cause of the bizarre irreproducible bugs
that Pale Moon (and maybe Cromite) users have been reporting since at
least #87 was merged.

The root cause is an invalid boolean statement:

```js
// send a progress update every 1024 iterations. since each thread checks
// separate values, one simple way to do this is by bit masking the
// nonce for multiples of 1024. unfortunately, if the number of threads
// is not prime, only some of the threads will be sending the status
// update and they will get behind the others. this is slightly more
// complicated but ensures an even distribution between threads.
if (
  (nonce > oldNonce) | 1023 && // we've wrapped past 1024
  (nonce >> 10) % threads === threadId // and it's our turn
) {
  postMessage(nonce);
}
```

The logic here looks fine but is subtly wrong as was reported in #877
by a user in the Pale Moon community. Consider the following scenario:

`nonce` is a counter that increments by the worker count every loop.
This is intended to spread the load between CPU cores as such:

| Iteration | Worker ID | Nonce |
| :-------- | :-------- | :---- |
| 1         | 0         | 0     |
| 1         | 1         | 1     |
| 2         | 0         | 2     |
| 3         | 1         | 3     |

And so on.

The incorrect part of this is the boolean logic, specifically the part
with the bitwise or `|`. I think the intent was to use a logical or
(`||`), but this had the effect of making the `postMessage` handler fire
on every iteration. The intent of this snippet (as the comment clearly
indicates) is to make sure that the main event loop is only updated with
the worker status every 1024 iterations per worker. This had the
opposite effect, causing a lot of messages to be sent from workers to
the parent JavaScript context.

This is bad for the event loop.

Instead, I have ripped out that statement and replaced it with a much
simpler increment only counter that fires every 1024 iterations.
Additionally, only the first thread communicates back to the parent
process. This does mean that in theory the other workers could be ahead
of the first thread (posting a message out of a worker has a nonzero
cost), but in practice I don't think this will be as much of an issue as
the current behaviour is.

The root cause of the stack exhaustion is likely the pressure caused by
all of the postMessage futures piling up. Maybe the larger stack size in
64 bit environments is causing this to be fine there, maybe it's some
combination of newer hardware in 64 bit systems making this not be as
much of a problem due to it being able to handle events fast enough to
keep up with the pressure.

Either way, thanks much to @wolfbeast and the Pale Moon community for
finding this. This will make Anubis faster for everyone!

Signed-off-by: Xe Iaso <xe.iaso@techaro.lol>
2025-07-21 18:05:24 -04:00

138 lines
3.3 KiB
JavaScript

export default function process(
data,
difficulty = 5,
signal = null,
progressCallback = null,
threads = Math.max(navigator.hardwareConcurrency / 2, 1),
) {
console.debug("fast algo");
return new Promise((resolve, reject) => {
let webWorkerURL = URL.createObjectURL(
new Blob(["(", processTask(), ")()"], { type: "application/javascript" }),
);
const workers = [];
let settled = false;
const cleanup = () => {
if (settled) {
return;
}
settled = true;
workers.forEach((w) => w.terminate());
if (signal != null) {
signal.removeEventListener("abort", onAbort);
}
URL.revokeObjectURL(webWorkerURL);
};
const onAbort = () => {
console.log("PoW aborted");
cleanup();
reject(new DOMException("Aborted", "AbortError"));
};
if (signal != null) {
if (signal.aborted) {
return onAbort();
}
signal.addEventListener("abort", onAbort, { once: true });
}
for (let i = 0; i < threads; i++) {
let worker = new Worker(webWorkerURL);
worker.onmessage = (event) => {
if (typeof event.data === "number") {
progressCallback?.(event.data);
} else {
cleanup();
resolve(event.data);
}
};
worker.onerror = (event) => {
cleanup();
reject(event);
};
worker.postMessage({
data,
difficulty,
nonce: i,
threads,
});
workers.push(worker);
}
});
}
function processTask() {
return function () {
const sha256 = (text) => {
const encoded = new TextEncoder().encode(text);
return crypto.subtle.digest("SHA-256", encoded.buffer);
};
function uint8ArrayToHexString(arr) {
return Array.from(arr)
.map((c) => c.toString(16).padStart(2, "0"))
.join("");
}
addEventListener("message", async (event) => {
let data = event.data.data;
let difficulty = event.data.difficulty;
let hash;
let nonce = event.data.nonce;
let threads = event.data.threads;
const threadId = nonce;
let localIterationCount = 0;
while (true) {
const currentHash = await sha256(data + nonce);
const thisHash = new Uint8Array(currentHash);
let valid = true;
for (let j = 0; j < difficulty; j++) {
const byteIndex = Math.floor(j / 2); // which byte we are looking at
const nibbleIndex = j % 2; // which nibble in the byte we are looking at (0 is high, 1 is low)
let nibble =
(thisHash[byteIndex] >> (nibbleIndex === 0 ? 4 : 0)) & 0x0f; // Get the nibble
if (nibble !== 0) {
valid = false;
break;
}
}
if (valid) {
hash = uint8ArrayToHexString(thisHash);
console.log(hash);
break;
}
nonce += threads;
// send a progress update every 1024 iterations so that the user can be informed of
// the state of the challenge.
if (threadId == 0 && localIterationCount === 1024) {
postMessage(nonce);
localIterationCount = 0;
}
localIterationCount++;
}
postMessage({
hash,
data,
difficulty,
nonce,
});
});
}.toString();
}