Critical fix for threads, small fixes for /bin/edit, /lib/process, greetings, and term.read

thread fix:
Significant quality update for embedded threads (threads inside threads inside threads...) Also, added a "thread_exit" event. This is not yet official, I may add more meta data to the event later.

/bin/edit:
Found a case where text was being trimmed where it doesn't need to be

/lib/process
protect a .. operator from crashes with a tostring

greetings
updated the comment in a greeting, you can no longer just remove /etc/motd without a stderr error about the file missing on boot (without changing your /etc/profile)

/lib/term
term.read() was behaving as io.read() for tty, and as io.read("*l") for non-tty, now they both use *L
This commit is contained in:
payonel 2017-06-15 10:40:11 -07:00
parent 2ee3a1a119
commit a61204aee8
5 changed files with 83 additions and 50 deletions

View File

@ -633,9 +633,6 @@ do
local x, y, w, h = getArea() local x, y, w, h = getArea()
local chars = 0 local chars = 0
for line in f:lines() do for line in f:lines() do
if line:sub(-1) == "\r" then
line = line:sub(1, -2)
end
table.insert(buffer, line) table.insert(buffer, line)
chars = chars + unicode.len(line) chars = chars + unicode.len(line)
if #buffer <= h then if #buffer <= h then

View File

@ -68,7 +68,7 @@ function process.load(path, env, init, name)
-- msg can be a custom error object -- msg can be a custom error object
if type(msg) == "table" then if type(msg) == "table" then
if msg.reason ~= "terminated" then if msg.reason ~= "terminated" then
io.stderr:write(msg.reason.."\n") io.stderr:write(tostring(msg.reason), "\n")
end end
return msg.code or 0 return msg.code or 0
end end

View File

@ -181,7 +181,7 @@ end
function term.read(history, dobreak, hint, pwchar, filter) function term.read(history, dobreak, hint, pwchar, filter)
if not io.stdin.tty then if not io.stdin.tty then
return io.read() return io.read("*L")
end end
history = history or {} history = history or {}
local handler = history local handler = history

View File

@ -5,6 +5,46 @@ local computer = require("computer")
local thread = {} local thread = {}
do
local handlers = event.handlers
local handlers_mt = getmetatable(handlers)
-- the event library sets a metatable on handlers, but we set threaded=true
if not handlers_mt.threaded then
-- find the root process
local root_data
for _,p in pairs(process.list) do
if not p.parent then
root_data = p.data
break
end
end
assert(root_data, "thread library panic: no root proc")
handlers_mt.threaded = true
-- if we don't separate root handlers from thread handlers we see double dispatch
-- because the thread calls dispatch on pull as well
root_data.handlers = {} -- root handlers
root_data.pull = handlers_mt.__call -- the real computer.pullSignal
while true do
local key, value = next(handlers)
if not key then break end
root_data.handlers[key] = value
handlers[key] = nil
end
handlers_mt.__index = function(_, key)
return process.info().data.handlers[key]
end
handlers_mt.__newindex = function(_, key, value)
process.info().data.handlers[key] = value
end
handlers_mt.__pairs = function(_, ...)
return pairs(process.info().data.handlers, ...)
end
handlers_mt.__call = function(tbl, ...)
return process.info().data.pull(tbl, ...)
end
end
end
local function waitForDeath(threads, timeout, all) local function waitForDeath(threads, timeout, all)
checkArg(1, threads, "table") checkArg(1, threads, "table")
checkArg(2, timeout, "number", "nil") checkArg(2, timeout, "number", "nil")
@ -15,7 +55,7 @@ local function waitForDeath(threads, timeout, all)
local deadline = computer.uptime() + timeout local deadline = computer.uptime() + timeout
while deadline > computer.uptime() do while deadline > computer.uptime() do
local dieing = {} local dieing = {}
local living = {} local living = false
for _,t in ipairs(threads) do for _,t in ipairs(threads) do
local result = t.process and t.process.data.result local result = t.process and t.process.data.result
local proc_ok = type(result) ~= "table" or result[1] local proc_ok = type(result) ~= "table" or result[1]
@ -25,18 +65,18 @@ local function waitForDeath(threads, timeout, all)
dieing[#dieing + 1] = t dieing[#dieing + 1] = t
mortician[t] = true mortician[t] = true
else else
living[#living + 1] = t living = true
end end
end end
if all and #living == 0 or not all and #dieing > 0 then if all and not living or not all and #dieing > 0 then
timed_out = false timed_out = false
break break
end end
-- resume each non dead thread -- resume each non dead thread
-- we KNOW all threads are event.pull blocked -- we KNOW all threads are event.pull blocked
event.pull() event.pull(deadline - computer.uptime())
end end
for t in pairs(mortician) do for t in pairs(mortician) do
@ -58,8 +98,7 @@ function thread.waitForAll(threads, timeout)
end end
local box_thread = {} local box_thread = {}
local box_thread_handle = {} local box_thread_handle = {close = thread.waitForAll}
box_thread_handle.close = thread.waitForAll
local function get_box_thread_handle(handles, bCreate) local function get_box_thread_handle(handles, bCreate)
for _,next_handle in ipairs(handles) do for _,next_handle in ipairs(handles) do
@ -94,7 +133,6 @@ function box_thread:status()
end end
function box_thread:join(timeout) function box_thread:join(timeout)
self:detach()
return box_thread_handle.close({self}, timeout) return box_thread_handle.close({self}, timeout)
end end
@ -118,6 +156,7 @@ function box_thread:detach()
end end
for i,h in ipairs(btHandle) do for i,h in ipairs(btHandle) do
if h == self then if h == self then
self.process = nil
return table.remove(btHandle, i) return table.remove(btHandle, i)
end end
end end
@ -144,8 +183,7 @@ function thread.create(fp, ...)
local t = setmetatable({}, {__status="suspended",__index=box_thread}) local t = setmetatable({}, {__status="suspended",__index=box_thread})
t.pco = pipes.createCoroutineStack(function(...) t.pco = pipes.createCoroutineStack(function(...)
local mt = getmetatable(t) getmetatable(t).__status = "running"
mt.__status = "running"
local fp_co = t.pco.create(fp) local fp_co = t.pco.create(fp)
-- run fp_co until dead -- run fp_co until dead
-- pullSignal will yield_all past this point -- pullSignal will yield_all past this point
@ -154,45 +192,43 @@ function thread.create(fp, ...)
while true do while true do
local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n))) local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n)))
if t.pco.status(fp_co) == "dead" then if t.pco.status(fp_co) == "dead" then
-- this error handling is VERY much like process.lua
-- maybe one day it'll merge
if not result[1] then if not result[1] then
event.onError(string.format("thread crashed: %s", tostring(result[2]))) local exit_code
local msg = result[2]
-- msg can be a custom error object
local reason = "crashed"
if type(msg) == "table" then
if type(msg.reason) == "string" then
reason = msg.reason
end
exit_code = tonumber(msg.code)
elseif type(msg) == "string" then
reason = msg
end
if not exit_code then
pcall(event.onError, string.format("[thread] %s", reason))
exit_code = 1
end
os.exit(exit_code)
end end
break break
end end
args = table.pack(event.pull(table.unpack(result, 2, result.n))) args = table.pack(event.pull(table.unpack(result, 2, result.n)))
end end
mt.__status = "dead"
event.push("thread_exit")
t:detach()
end) end)
local handlers = event.handlers
local handlers_mt = getmetatable(handlers) --special resume to keep track of process death
-- the event library sets a metatable on handlers local function private_resume(...)
-- but not a pull field local result = table.pack(t.pco.resume_all(...))
if not handlers_mt.pull then if #t.pco.stack == 0 then
-- if we don't separate root handlers from thread handlers we see double dispatch t:detach()
-- because the thread calls dispatch on pull as well local mt = getmetatable(t)
handlers_mt.handlers = {} -- root handlers mt.__status = "dead"
handlers_mt.pull = handlers_mt.__call -- the real computer.pullSignal event.push("thread_exit")
handlers_mt.current = function(field) return process.info().data[field] or handlers_mt[field] end
while true do
local key, value = next(handlers)
if not key then break end
handlers_mt.handlers[key] = value
handlers[key] = nil
end
handlers_mt.__index = function(_, key)
return handlers_mt.current("handlers")[key]
end
handlers_mt.__newindex = function(_, key, value)
handlers_mt.current("handlers")[key] = value
end
handlers_mt.__pairs = function(_, ...)
return pairs(handlers_mt.current("handlers"), ...)
end
handlers_mt.__call = function(tbl, ...)
return handlers_mt.current("pull")(tbl, ...)
end end
return table.unpack(result, 1, result.n)
end end
local data = process.info(t.pco.stack[1]).data local data = process.info(t.pco.stack[1]).data
@ -201,10 +237,10 @@ function thread.create(fp, ...)
-- register a timeout handler -- register a timeout handler
-- hack so that event.register sees the root handlers -- hack so that event.register sees the root handlers
local data_handlers = data.handlers local data_handlers = data.handlers
data.handlers = handlers_mt.handlers data.handlers = process.info(2).data.handlers
event.register( event.register(
nil, -- nil key matches anything, timers use false keys nil, -- nil key matches anything, timers use false keys
t.pco.resume_all, private_resume,
timeout, -- wait for the time specified by the caller timeout, -- wait for the time specified by the caller
1) -- we only want this thread to wake up once 1) -- we only want this thread to wake up once
data.handlers = data_handlers data.handlers = data_handlers
@ -215,12 +251,12 @@ function thread.create(fp, ...)
repeat repeat
event_data = table.pack(t.pco.yield_all(timeout)) event_data = table.pack(t.pco.yield_all(timeout))
-- during sleep, we may have been suspended -- during sleep, we may have been suspended
until getmetatable(t).__status ~= "suspended" until t:status() ~= "suspended"
return table.unpack(event_data, 1, event_data.n) return table.unpack(event_data, 1, event_data.n)
end end
t:attach() t:attach()
t.pco.resume_all(...) -- threads start out running private_resume(...) -- threads start out running
return t return t
end end

View File

@ -22,7 +22,7 @@ Many component methods have a short documentation - use `=component.componentNam
You can get a list of all attached components using the `components` program. You can get a list of all attached components using the `components` program.
If you encounter out of memory errors, throw more RAM at your computer. If you encounter out of memory errors, throw more RAM at your computer.
Have you tried turning it off and on again? Have you tried turning it off and on again?
To disable this greeting, install OpenOS to a writeable medium and delete `/etc/motd`. To disable this greeting, install OpenOS to a writeable medium and remove the `/etc/motd` line from `/etc/profile`.
Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/. Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/.
Please report bugs on the Github issue tracker, thank you! Please report bugs on the Github issue tracker, thank you!
Beware of cycles when building networks, or you may get duplicate messages! Beware of cycles when building networks, or you may get duplicate messages!