mirror of
https://github.com/MightyPirates/OpenComputers.git
synced 2025-09-17 03:05:30 -04:00
Critical fix for threads, small fixes for /bin/edit, /lib/process, greetings, and term.read
thread fix: Significant quality update for embedded threads (threads inside threads inside threads...) Also, added a "thread_exit" event. This is not yet official, I may add more meta data to the event later. /bin/edit: Found a case where text was being trimmed where it doesn't need to be /lib/process protect a .. operator from crashes with a tostring greetings updated the comment in a greeting, you can no longer just remove /etc/motd without a stderr error about the file missing on boot (without changing your /etc/profile) /lib/term term.read() was behaving as io.read() for tty, and as io.read("*l") for non-tty, now they both use *L
This commit is contained in:
parent
2ee3a1a119
commit
a61204aee8
@ -633,9 +633,6 @@ do
|
||||
local x, y, w, h = getArea()
|
||||
local chars = 0
|
||||
for line in f:lines() do
|
||||
if line:sub(-1) == "\r" then
|
||||
line = line:sub(1, -2)
|
||||
end
|
||||
table.insert(buffer, line)
|
||||
chars = chars + unicode.len(line)
|
||||
if #buffer <= h then
|
||||
|
@ -68,7 +68,7 @@ function process.load(path, env, init, name)
|
||||
-- msg can be a custom error object
|
||||
if type(msg) == "table" then
|
||||
if msg.reason ~= "terminated" then
|
||||
io.stderr:write(msg.reason.."\n")
|
||||
io.stderr:write(tostring(msg.reason), "\n")
|
||||
end
|
||||
return msg.code or 0
|
||||
end
|
||||
|
@ -181,7 +181,7 @@ end
|
||||
|
||||
function term.read(history, dobreak, hint, pwchar, filter)
|
||||
if not io.stdin.tty then
|
||||
return io.read()
|
||||
return io.read("*L")
|
||||
end
|
||||
history = history or {}
|
||||
local handler = history
|
||||
|
@ -5,6 +5,46 @@ local computer = require("computer")
|
||||
|
||||
local thread = {}
|
||||
|
||||
do
|
||||
local handlers = event.handlers
|
||||
local handlers_mt = getmetatable(handlers)
|
||||
-- the event library sets a metatable on handlers, but we set threaded=true
|
||||
if not handlers_mt.threaded then
|
||||
-- find the root process
|
||||
local root_data
|
||||
for _,p in pairs(process.list) do
|
||||
if not p.parent then
|
||||
root_data = p.data
|
||||
break
|
||||
end
|
||||
end
|
||||
assert(root_data, "thread library panic: no root proc")
|
||||
handlers_mt.threaded = true
|
||||
-- if we don't separate root handlers from thread handlers we see double dispatch
|
||||
-- because the thread calls dispatch on pull as well
|
||||
root_data.handlers = {} -- root handlers
|
||||
root_data.pull = handlers_mt.__call -- the real computer.pullSignal
|
||||
while true do
|
||||
local key, value = next(handlers)
|
||||
if not key then break end
|
||||
root_data.handlers[key] = value
|
||||
handlers[key] = nil
|
||||
end
|
||||
handlers_mt.__index = function(_, key)
|
||||
return process.info().data.handlers[key]
|
||||
end
|
||||
handlers_mt.__newindex = function(_, key, value)
|
||||
process.info().data.handlers[key] = value
|
||||
end
|
||||
handlers_mt.__pairs = function(_, ...)
|
||||
return pairs(process.info().data.handlers, ...)
|
||||
end
|
||||
handlers_mt.__call = function(tbl, ...)
|
||||
return process.info().data.pull(tbl, ...)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local function waitForDeath(threads, timeout, all)
|
||||
checkArg(1, threads, "table")
|
||||
checkArg(2, timeout, "number", "nil")
|
||||
@ -15,7 +55,7 @@ local function waitForDeath(threads, timeout, all)
|
||||
local deadline = computer.uptime() + timeout
|
||||
while deadline > computer.uptime() do
|
||||
local dieing = {}
|
||||
local living = {}
|
||||
local living = false
|
||||
for _,t in ipairs(threads) do
|
||||
local result = t.process and t.process.data.result
|
||||
local proc_ok = type(result) ~= "table" or result[1]
|
||||
@ -25,18 +65,18 @@ local function waitForDeath(threads, timeout, all)
|
||||
dieing[#dieing + 1] = t
|
||||
mortician[t] = true
|
||||
else
|
||||
living[#living + 1] = t
|
||||
living = true
|
||||
end
|
||||
end
|
||||
|
||||
if all and #living == 0 or not all and #dieing > 0 then
|
||||
if all and not living or not all and #dieing > 0 then
|
||||
timed_out = false
|
||||
break
|
||||
end
|
||||
|
||||
-- resume each non dead thread
|
||||
-- we KNOW all threads are event.pull blocked
|
||||
event.pull()
|
||||
event.pull(deadline - computer.uptime())
|
||||
end
|
||||
|
||||
for t in pairs(mortician) do
|
||||
@ -58,8 +98,7 @@ function thread.waitForAll(threads, timeout)
|
||||
end
|
||||
|
||||
local box_thread = {}
|
||||
local box_thread_handle = {}
|
||||
box_thread_handle.close = thread.waitForAll
|
||||
local box_thread_handle = {close = thread.waitForAll}
|
||||
|
||||
local function get_box_thread_handle(handles, bCreate)
|
||||
for _,next_handle in ipairs(handles) do
|
||||
@ -94,7 +133,6 @@ function box_thread:status()
|
||||
end
|
||||
|
||||
function box_thread:join(timeout)
|
||||
self:detach()
|
||||
return box_thread_handle.close({self}, timeout)
|
||||
end
|
||||
|
||||
@ -118,6 +156,7 @@ function box_thread:detach()
|
||||
end
|
||||
for i,h in ipairs(btHandle) do
|
||||
if h == self then
|
||||
self.process = nil
|
||||
return table.remove(btHandle, i)
|
||||
end
|
||||
end
|
||||
@ -144,8 +183,7 @@ function thread.create(fp, ...)
|
||||
|
||||
local t = setmetatable({}, {__status="suspended",__index=box_thread})
|
||||
t.pco = pipes.createCoroutineStack(function(...)
|
||||
local mt = getmetatable(t)
|
||||
mt.__status = "running"
|
||||
getmetatable(t).__status = "running"
|
||||
local fp_co = t.pco.create(fp)
|
||||
-- run fp_co until dead
|
||||
-- pullSignal will yield_all past this point
|
||||
@ -154,45 +192,43 @@ function thread.create(fp, ...)
|
||||
while true do
|
||||
local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n)))
|
||||
if t.pco.status(fp_co) == "dead" then
|
||||
-- this error handling is VERY much like process.lua
|
||||
-- maybe one day it'll merge
|
||||
if not result[1] then
|
||||
event.onError(string.format("thread crashed: %s", tostring(result[2])))
|
||||
local exit_code
|
||||
local msg = result[2]
|
||||
-- msg can be a custom error object
|
||||
local reason = "crashed"
|
||||
if type(msg) == "table" then
|
||||
if type(msg.reason) == "string" then
|
||||
reason = msg.reason
|
||||
end
|
||||
exit_code = tonumber(msg.code)
|
||||
elseif type(msg) == "string" then
|
||||
reason = msg
|
||||
end
|
||||
if not exit_code then
|
||||
pcall(event.onError, string.format("[thread] %s", reason))
|
||||
exit_code = 1
|
||||
end
|
||||
os.exit(exit_code)
|
||||
end
|
||||
break
|
||||
end
|
||||
args = table.pack(event.pull(table.unpack(result, 2, result.n)))
|
||||
end
|
||||
mt.__status = "dead"
|
||||
event.push("thread_exit")
|
||||
t:detach()
|
||||
end)
|
||||
local handlers = event.handlers
|
||||
local handlers_mt = getmetatable(handlers)
|
||||
-- the event library sets a metatable on handlers
|
||||
-- but not a pull field
|
||||
if not handlers_mt.pull then
|
||||
-- if we don't separate root handlers from thread handlers we see double dispatch
|
||||
-- because the thread calls dispatch on pull as well
|
||||
handlers_mt.handlers = {} -- root handlers
|
||||
handlers_mt.pull = handlers_mt.__call -- the real computer.pullSignal
|
||||
handlers_mt.current = function(field) return process.info().data[field] or handlers_mt[field] end
|
||||
while true do
|
||||
local key, value = next(handlers)
|
||||
if not key then break end
|
||||
handlers_mt.handlers[key] = value
|
||||
handlers[key] = nil
|
||||
end
|
||||
handlers_mt.__index = function(_, key)
|
||||
return handlers_mt.current("handlers")[key]
|
||||
end
|
||||
handlers_mt.__newindex = function(_, key, value)
|
||||
handlers_mt.current("handlers")[key] = value
|
||||
end
|
||||
handlers_mt.__pairs = function(_, ...)
|
||||
return pairs(handlers_mt.current("handlers"), ...)
|
||||
end
|
||||
handlers_mt.__call = function(tbl, ...)
|
||||
return handlers_mt.current("pull")(tbl, ...)
|
||||
|
||||
--special resume to keep track of process death
|
||||
local function private_resume(...)
|
||||
local result = table.pack(t.pco.resume_all(...))
|
||||
if #t.pco.stack == 0 then
|
||||
t:detach()
|
||||
local mt = getmetatable(t)
|
||||
mt.__status = "dead"
|
||||
event.push("thread_exit")
|
||||
end
|
||||
return table.unpack(result, 1, result.n)
|
||||
end
|
||||
|
||||
local data = process.info(t.pco.stack[1]).data
|
||||
@ -201,10 +237,10 @@ function thread.create(fp, ...)
|
||||
-- register a timeout handler
|
||||
-- hack so that event.register sees the root handlers
|
||||
local data_handlers = data.handlers
|
||||
data.handlers = handlers_mt.handlers
|
||||
data.handlers = process.info(2).data.handlers
|
||||
event.register(
|
||||
nil, -- nil key matches anything, timers use false keys
|
||||
t.pco.resume_all,
|
||||
private_resume,
|
||||
timeout, -- wait for the time specified by the caller
|
||||
1) -- we only want this thread to wake up once
|
||||
data.handlers = data_handlers
|
||||
@ -215,12 +251,12 @@ function thread.create(fp, ...)
|
||||
repeat
|
||||
event_data = table.pack(t.pco.yield_all(timeout))
|
||||
-- during sleep, we may have been suspended
|
||||
until getmetatable(t).__status ~= "suspended"
|
||||
until t:status() ~= "suspended"
|
||||
return table.unpack(event_data, 1, event_data.n)
|
||||
end
|
||||
|
||||
t:attach()
|
||||
t.pco.resume_all(...) -- threads start out running
|
||||
private_resume(...) -- threads start out running
|
||||
|
||||
return t
|
||||
end
|
||||
|
@ -22,7 +22,7 @@ Many component methods have a short documentation - use `=component.componentNam
|
||||
You can get a list of all attached components using the `components` program.
|
||||
If you encounter out of memory errors, throw more RAM at your computer.
|
||||
Have you tried turning it off and on again?
|
||||
To disable this greeting, install OpenOS to a writeable medium and delete `/etc/motd`.
|
||||
To disable this greeting, install OpenOS to a writeable medium and remove the `/etc/motd` line from `/etc/profile`.
|
||||
Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/.
|
||||
Please report bugs on the Github issue tracker, thank you!
|
||||
Beware of cycles when building networks, or you may get duplicate messages!
|
||||
|
Loading…
x
Reference in New Issue
Block a user