From 5383b641e7500c35a785a6529f67238e35038619 Mon Sep 17 00:00:00 2001 From: Nexarius Date: Thu, 13 Apr 2017 16:40:45 +0200 Subject: [PATCH 1/7] Update robot.names --- src/main/resources/assets/opencomputers/robot.names | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main/resources/assets/opencomputers/robot.names b/src/main/resources/assets/opencomputers/robot.names index fd43fa689..27047dffa 100644 --- a/src/main/resources/assets/opencomputers/robot.names +++ b/src/main/resources/assets/opencomputers/robot.names @@ -61,7 +61,7 @@ Hex # Discworld Homunk # Perry Rhodan *Hyun-ae # Analogue: A Hate Story / Hate Plus Icarus # Deus Ex -J.A.R.V.I.S # Iron Man +J.A.R.V.I.S. # Iron Man Johnny 5 # Short Circuit LizzyTrickster # Contributor K-9 # Doctor Who @@ -91,6 +91,7 @@ Shakey # The first general-purpose mobile robot that could reason ab Skynet # Terminator Space Core # Portal SpiritedDusty # Contributor +T-800 # Terminator T-1000 # Terminator Tachikoma # Ghost in the Shell TARA UH # Perry Rhodan @@ -100,7 +101,7 @@ Uniblab # The Jetsons Unimate # First programmable robot. Vertigo # Perry Rhodan Vexatos # Contributor -V.I.K.I # Virtual Interactive Kinetic Intelligence - I, Robot +V.I.K.I. # Virtual Interactive Kinetic Intelligence - I, Robot Wall-E # Wall-E Watson # IBM Watson Weebo # Flubber From 1ce5e06df2171e12e4a078f72e7a025132323356 Mon Sep 17 00:00:00 2001 From: payonel Date: Sat, 27 May 2017 10:42:30 -0700 Subject: [PATCH 2/7] send drop even without drag --- src/main/scala/li/cil/oc/client/gui/Screen.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/scala/li/cil/oc/client/gui/Screen.scala b/src/main/scala/li/cil/oc/client/gui/Screen.scala index 93296f6c2..a568f68dc 100644 --- a/src/main/scala/li/cil/oc/client/gui/Screen.scala +++ b/src/main/scala/li/cil/oc/client/gui/Screen.scala @@ -16,7 +16,7 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has private val bufferMargin = BufferRenderer.margin + BufferRenderer.innerMargin - private var didDrag = false + private var didClick = false private var x, y = 0 @@ -57,13 +57,13 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has protected override def mouseMovedOrUp(mouseX: Int, mouseY: Int, button: Int) { super.mouseMovedOrUp(mouseX, mouseY, button) if (hasMouse && button >= 0) { - if (didDrag) { + if (didClick) { toBufferCoordinates(mouseX, mouseY) match { case Some((bx, by)) => buffer.mouseUp(bx, by, button, null) case _ => buffer.mouseUp(-1.0, -1.0, button, null) } } - didDrag = false + didClick = false mx = -1 my = -1 } @@ -74,7 +74,7 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has case Some((bx, by)) if bx.toInt != mx || (by*2).toInt != my => if (mx >= 0 && my >= 0) buffer.mouseDrag(bx, by, button, null) else buffer.mouseDown(bx, by, button, null) - didDrag = mx >= 0 && my >= 0 + didClick = true mx = bx.toInt my = (by*2).toInt // for high precision mode, sends some unnecessary packets when not using it, but eh case _ => From 2ee3a1a1193606ce5c05488d3ce110a10efb19b9 Mon Sep 17 00:00:00 2001 From: Vexatos Date: Wed, 24 May 2017 20:38:30 +0200 Subject: [PATCH 3/7] Made tank controllers work better with blocks that provide multiple tanks. --- .../component/traits/TankWorldControl.scala | 8 +++-- .../component/traits/WorldTankAnalytics.scala | 35 ++++++++++++++----- .../li/cil/oc/util/ExtendedArguments.scala | 15 ++++++++ 3 files changed, 47 insertions(+), 11 deletions(-) diff --git a/src/main/scala/li/cil/oc/server/component/traits/TankWorldControl.scala b/src/main/scala/li/cil/oc/server/component/traits/TankWorldControl.scala index ee6254298..1bc24c019 100644 --- a/src/main/scala/li/cil/oc/server/component/traits/TankWorldControl.scala +++ b/src/main/scala/li/cil/oc/server/component/traits/TankWorldControl.scala @@ -7,15 +7,19 @@ import li.cil.oc.util.ExtendedArguments._ import li.cil.oc.util.FluidUtils import li.cil.oc.util.ResultWrapper.result import net.minecraftforge.fluids.FluidStack +import net.minecraftforge.fluids.FluidTankInfo trait TankWorldControl extends TankAware with WorldAware with SideRestricted { - @Callback(doc = "function(side:number):boolean -- Compare the fluid in the selected tank with the fluid on the specified side. Returns true if equal.") + @Callback(doc = "function(side:number [, tank:number]):boolean -- Compare the fluid in the selected tank with the fluid in the specified tank on the specified side. Returns true if equal.") def compareFluid(context: Context, args: Arguments): Array[AnyRef] = { val side = checkSideForAction(args, 0) fluidInTank(selectedTank) match { case Some(stack) => FluidUtils.fluidHandlerAt(position.offset(side)) match { - case Some(handler) => result(Option(handler.getTankInfo(side.getOpposite)).exists(_.exists(other => stack.isFluidEqual(other.fluid)))) + case Some(handler) => args.optTankInfo(handler, side.getOpposite, 1, null) match { + case info: FluidTankInfo => result(stack.isFluidEqual(info.fluid)) + case _ => result(Option(handler.getTankInfo(side.getOpposite)).exists(_.exists(other => stack.isFluidEqual(other.fluid)))) + } case _ => result(false) } case _ => result(false) diff --git a/src/main/scala/li/cil/oc/server/component/traits/WorldTankAnalytics.scala b/src/main/scala/li/cil/oc/server/component/traits/WorldTankAnalytics.scala index ec7950223..7a8a9ccbc 100644 --- a/src/main/scala/li/cil/oc/server/component/traits/WorldTankAnalytics.scala +++ b/src/main/scala/li/cil/oc/server/component/traits/WorldTankAnalytics.scala @@ -5,38 +5,55 @@ import li.cil.oc.api.machine.Arguments import li.cil.oc.api.machine.Callback import li.cil.oc.api.machine.Context import li.cil.oc.server.component.result +import li.cil.oc.util.ExtendedArguments._ import li.cil.oc.util.FluidUtils +import net.minecraftforge.fluids.FluidTankInfo trait WorldTankAnalytics extends WorldAware with SideRestricted { - @Callback(doc = """function(side:number):number -- Get the amount of fluid in the tank on the specified side.""") + @Callback(doc = """function(side:number [, tank:number]):number -- Get the amount of fluid in the specified tank on the specified side.""") def getTankLevel(context: Context, args: Arguments): Array[AnyRef] = { val facing = checkSideForAction(args, 0) FluidUtils.fluidHandlerAt(position.offset(facing)) match { - case Some(handler) => - result(handler.getTankInfo(facing.getOpposite).map(info => Option(info.fluid).fold(0)(_.amount)).sum) + case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match { + case info: FluidTankInfo => result(Option(info.fluid).fold(0)(_.amount)) + case _ => result(handler.getTankInfo(facing.getOpposite).map(info => Option(info.fluid).fold(0)(_.amount)).sum) + } case _ => result(Unit, "no tank") } } - @Callback(doc = """function(side:number):number -- Get the capacity of the tank on the specified side.""") + @Callback(doc = """function(side:number [, tank:number]):number -- Get the capacity of the specified tank on the specified side.""") def getTankCapacity(context: Context, args: Arguments): Array[AnyRef] = { val facing = checkSideForAction(args, 0) FluidUtils.fluidHandlerAt(position.offset(facing)) match { - case Some(handler) => - result(handler.getTankInfo(facing.getOpposite).map(_.capacity).foldLeft(0)((max, capacity) => math.max(max, capacity))) + case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match { + case info: FluidTankInfo => result(info.capacity) + case _ => result(handler.getTankInfo(facing.getOpposite).map(_.capacity).foldLeft(0)((max, capacity) => math.max(max, capacity))) + } case _ => result(Unit, "no tank") } } - @Callback(doc = """function(side:number):table -- Get a description of the fluid in the the tank on the specified side.""") + @Callback(doc = """function(side:number [, tank:number]):table -- Get a description of the fluid in the the specified tank on the specified side.""") def getFluidInTank(context: Context, args: Arguments): Array[AnyRef] = if (Settings.get.allowItemStackInspection) { val facing = checkSideForAction(args, 0) FluidUtils.fluidHandlerAt(position.offset(facing)) match { - case Some(handler) => - result(handler.getTankInfo(facing.getOpposite)) + case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match { + case info: FluidTankInfo => result(info) + case _ => result(handler.getTankInfo(facing.getOpposite)) + } case _ => result(Unit, "no tank") } } else result(Unit, "not enabled in config") + + @Callback(doc = """function(side:number):number -- Get the number of tanks available on the specified side.""") + def getTankCount(context: Context, args: Arguments): Array[AnyRef] = { + val facing = checkSideForAction(args, 0) + FluidUtils.fluidHandlerAt(position.offset(facing)) match { + case Some(handler) => result(handler.getTankInfo(facing.getOpposite).length) + case _ => result(Unit, "no tank") + } + } } diff --git a/src/main/scala/li/cil/oc/util/ExtendedArguments.scala b/src/main/scala/li/cil/oc/util/ExtendedArguments.scala index c8bc71ff4..b66da3919 100644 --- a/src/main/scala/li/cil/oc/util/ExtendedArguments.scala +++ b/src/main/scala/li/cil/oc/util/ExtendedArguments.scala @@ -5,6 +5,8 @@ import li.cil.oc.api.machine.Arguments import net.minecraft.inventory.IInventory import net.minecraftforge.common.util.ForgeDirection import net.minecraftforge.fluids.FluidContainerRegistry +import net.minecraftforge.fluids.FluidTankInfo +import net.minecraftforge.fluids.IFluidHandler import scala.language.implicitConversions @@ -42,6 +44,19 @@ object ExtendedArguments { tank } + def checkTankInfo(handler: IFluidHandler, side: ForgeDirection, n: Int) = { + val tank = args.checkInteger(n) - 1 + if (tank < 0 || tank >= handler.getTankInfo(side).length) { + throw new IllegalArgumentException("invalid tank index") + } + handler.getTankInfo(side)(tank) + } + + def optTankInfo(handler: IFluidHandler, side: ForgeDirection, n: Int, default: FluidTankInfo) = { + if (!isDefined(n)) default + else checkTankInfo(handler, side, n) + } + def checkSideAny(index: Int) = checkSide(index, ForgeDirection.VALID_DIRECTIONS: _*) def optSideAny(index: Int, default: ForgeDirection) = From a61204aee8be2710e2ee2a072beb56c69779bdb7 Mon Sep 17 00:00:00 2001 From: payonel Date: Thu, 15 Jun 2017 10:40:11 -0700 Subject: [PATCH 4/7] Critical fix for threads, small fixes for /bin/edit, /lib/process, greetings, and term.read thread fix: Significant quality update for embedded threads (threads inside threads inside threads...) Also, added a "thread_exit" event. This is not yet official, I may add more meta data to the event later. /bin/edit: Found a case where text was being trimmed where it doesn't need to be /lib/process protect a .. operator from crashes with a tostring greetings updated the comment in a greeting, you can no longer just remove /etc/motd without a stderr error about the file missing on boot (without changing your /etc/profile) /lib/term term.read() was behaving as io.read() for tty, and as io.read("*l") for non-tty, now they both use *L --- .../opencomputers/loot/openos/bin/edit.lua | 3 - .../opencomputers/loot/openos/lib/process.lua | 2 +- .../opencomputers/loot/openos/lib/term.lua | 2 +- .../opencomputers/loot/openos/lib/thread.lua | 124 +++++++++++------- .../loot/openos/usr/misc/greetings.txt | 2 +- 5 files changed, 83 insertions(+), 50 deletions(-) diff --git a/src/main/resources/assets/opencomputers/loot/openos/bin/edit.lua b/src/main/resources/assets/opencomputers/loot/openos/bin/edit.lua index 85362f18e..3fae5aeef 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/bin/edit.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/bin/edit.lua @@ -633,9 +633,6 @@ do local x, y, w, h = getArea() local chars = 0 for line in f:lines() do - if line:sub(-1) == "\r" then - line = line:sub(1, -2) - end table.insert(buffer, line) chars = chars + unicode.len(line) if #buffer <= h then diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua index fdeecb63e..8b064097f 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua @@ -68,7 +68,7 @@ function process.load(path, env, init, name) -- msg can be a custom error object if type(msg) == "table" then if msg.reason ~= "terminated" then - io.stderr:write(msg.reason.."\n") + io.stderr:write(tostring(msg.reason), "\n") end return msg.code or 0 end diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua index 7a00c291b..3cac78a56 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua @@ -181,7 +181,7 @@ end function term.read(history, dobreak, hint, pwchar, filter) if not io.stdin.tty then - return io.read() + return io.read("*L") end history = history or {} local handler = history diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua index 8366243f2..f3234a6af 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua @@ -5,6 +5,46 @@ local computer = require("computer") local thread = {} +do + local handlers = event.handlers + local handlers_mt = getmetatable(handlers) + -- the event library sets a metatable on handlers, but we set threaded=true + if not handlers_mt.threaded then + -- find the root process + local root_data + for _,p in pairs(process.list) do + if not p.parent then + root_data = p.data + break + end + end + assert(root_data, "thread library panic: no root proc") + handlers_mt.threaded = true + -- if we don't separate root handlers from thread handlers we see double dispatch + -- because the thread calls dispatch on pull as well + root_data.handlers = {} -- root handlers + root_data.pull = handlers_mt.__call -- the real computer.pullSignal + while true do + local key, value = next(handlers) + if not key then break end + root_data.handlers[key] = value + handlers[key] = nil + end + handlers_mt.__index = function(_, key) + return process.info().data.handlers[key] + end + handlers_mt.__newindex = function(_, key, value) + process.info().data.handlers[key] = value + end + handlers_mt.__pairs = function(_, ...) + return pairs(process.info().data.handlers, ...) + end + handlers_mt.__call = function(tbl, ...) + return process.info().data.pull(tbl, ...) + end + end +end + local function waitForDeath(threads, timeout, all) checkArg(1, threads, "table") checkArg(2, timeout, "number", "nil") @@ -15,7 +55,7 @@ local function waitForDeath(threads, timeout, all) local deadline = computer.uptime() + timeout while deadline > computer.uptime() do local dieing = {} - local living = {} + local living = false for _,t in ipairs(threads) do local result = t.process and t.process.data.result local proc_ok = type(result) ~= "table" or result[1] @@ -25,18 +65,18 @@ local function waitForDeath(threads, timeout, all) dieing[#dieing + 1] = t mortician[t] = true else - living[#living + 1] = t + living = true end end - if all and #living == 0 or not all and #dieing > 0 then + if all and not living or not all and #dieing > 0 then timed_out = false break end -- resume each non dead thread -- we KNOW all threads are event.pull blocked - event.pull() + event.pull(deadline - computer.uptime()) end for t in pairs(mortician) do @@ -58,8 +98,7 @@ function thread.waitForAll(threads, timeout) end local box_thread = {} -local box_thread_handle = {} -box_thread_handle.close = thread.waitForAll +local box_thread_handle = {close = thread.waitForAll} local function get_box_thread_handle(handles, bCreate) for _,next_handle in ipairs(handles) do @@ -94,7 +133,6 @@ function box_thread:status() end function box_thread:join(timeout) - self:detach() return box_thread_handle.close({self}, timeout) end @@ -118,6 +156,7 @@ function box_thread:detach() end for i,h in ipairs(btHandle) do if h == self then + self.process = nil return table.remove(btHandle, i) end end @@ -144,8 +183,7 @@ function thread.create(fp, ...) local t = setmetatable({}, {__status="suspended",__index=box_thread}) t.pco = pipes.createCoroutineStack(function(...) - local mt = getmetatable(t) - mt.__status = "running" + getmetatable(t).__status = "running" local fp_co = t.pco.create(fp) -- run fp_co until dead -- pullSignal will yield_all past this point @@ -154,45 +192,43 @@ function thread.create(fp, ...) while true do local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n))) if t.pco.status(fp_co) == "dead" then + -- this error handling is VERY much like process.lua + -- maybe one day it'll merge if not result[1] then - event.onError(string.format("thread crashed: %s", tostring(result[2]))) + local exit_code + local msg = result[2] + -- msg can be a custom error object + local reason = "crashed" + if type(msg) == "table" then + if type(msg.reason) == "string" then + reason = msg.reason + end + exit_code = tonumber(msg.code) + elseif type(msg) == "string" then + reason = msg + end + if not exit_code then + pcall(event.onError, string.format("[thread] %s", reason)) + exit_code = 1 + end + os.exit(exit_code) end break end args = table.pack(event.pull(table.unpack(result, 2, result.n))) end - mt.__status = "dead" - event.push("thread_exit") - t:detach() end) - local handlers = event.handlers - local handlers_mt = getmetatable(handlers) - -- the event library sets a metatable on handlers - -- but not a pull field - if not handlers_mt.pull then - -- if we don't separate root handlers from thread handlers we see double dispatch - -- because the thread calls dispatch on pull as well - handlers_mt.handlers = {} -- root handlers - handlers_mt.pull = handlers_mt.__call -- the real computer.pullSignal - handlers_mt.current = function(field) return process.info().data[field] or handlers_mt[field] end - while true do - local key, value = next(handlers) - if not key then break end - handlers_mt.handlers[key] = value - handlers[key] = nil - end - handlers_mt.__index = function(_, key) - return handlers_mt.current("handlers")[key] - end - handlers_mt.__newindex = function(_, key, value) - handlers_mt.current("handlers")[key] = value - end - handlers_mt.__pairs = function(_, ...) - return pairs(handlers_mt.current("handlers"), ...) - end - handlers_mt.__call = function(tbl, ...) - return handlers_mt.current("pull")(tbl, ...) + + --special resume to keep track of process death + local function private_resume(...) + local result = table.pack(t.pco.resume_all(...)) + if #t.pco.stack == 0 then + t:detach() + local mt = getmetatable(t) + mt.__status = "dead" + event.push("thread_exit") end + return table.unpack(result, 1, result.n) end local data = process.info(t.pco.stack[1]).data @@ -201,10 +237,10 @@ function thread.create(fp, ...) -- register a timeout handler -- hack so that event.register sees the root handlers local data_handlers = data.handlers - data.handlers = handlers_mt.handlers + data.handlers = process.info(2).data.handlers event.register( nil, -- nil key matches anything, timers use false keys - t.pco.resume_all, + private_resume, timeout, -- wait for the time specified by the caller 1) -- we only want this thread to wake up once data.handlers = data_handlers @@ -215,12 +251,12 @@ function thread.create(fp, ...) repeat event_data = table.pack(t.pco.yield_all(timeout)) -- during sleep, we may have been suspended - until getmetatable(t).__status ~= "suspended" + until t:status() ~= "suspended" return table.unpack(event_data, 1, event_data.n) end t:attach() - t.pco.resume_all(...) -- threads start out running + private_resume(...) -- threads start out running return t end diff --git a/src/main/resources/assets/opencomputers/loot/openos/usr/misc/greetings.txt b/src/main/resources/assets/opencomputers/loot/openos/usr/misc/greetings.txt index 17467d51d..1ef0905a9 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/usr/misc/greetings.txt +++ b/src/main/resources/assets/opencomputers/loot/openos/usr/misc/greetings.txt @@ -22,7 +22,7 @@ Many component methods have a short documentation - use `=component.componentNam You can get a list of all attached components using the `components` program. If you encounter out of memory errors, throw more RAM at your computer. Have you tried turning it off and on again? -To disable this greeting, install OpenOS to a writeable medium and delete `/etc/motd`. +To disable this greeting, install OpenOS to a writeable medium and remove the `/etc/motd` line from `/etc/profile`. Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/. Please report bugs on the Github issue tracker, thank you! Beware of cycles when building networks, or you may get duplicate messages! From 5fdee17fcd0c40022b6a00c70efdbcdcd0753a42 Mon Sep 17 00:00:00 2001 From: payonel Date: Sun, 18 Jun 2017 00:02:08 -0700 Subject: [PATCH 5/7] openos 1.6.5 update fixes and optimizations, one api removed process: process.running has been deprecated for over 2 years and is now obsolete, please use process.info process: added process.internal.run designed to run a coroutine to completion 01_process: cleaner coroutine_handler intercept code event: removed unnecessary local function and added an optional handlers param to register to simplify thread code io: update popen to use new /lib/pipe.lua library pipe: new /lib/pipe replace /lib/pipes > superior coroutine stack code, can reparent stacks > handles all pipe work, sh calls /lib/pipe now for pipes sh: now uses /lib/pipe to build pipe chains, calls shell.run to run a pipe chain. thread: greatly improved reliability of embedded and detached threads boot: removed dead code --- .../loot/openos/boot/01_process.lua | 24 +- .../opencomputers/loot/openos/lib/event.lua | 22 +- .../opencomputers/loot/openos/lib/io.lua | 2 +- .../opencomputers/loot/openos/lib/pipe.lua | 207 ++++++++++ .../opencomputers/loot/openos/lib/pipes.lua | 372 ------------------ .../opencomputers/loot/openos/lib/process.lua | 23 +- .../opencomputers/loot/openos/lib/sh.lua | 21 +- .../opencomputers/loot/openos/lib/shell.lua | 13 +- .../opencomputers/loot/openos/lib/thread.lua | 214 +++++----- .../loot/openos/opt/core/boot.lua | 3 +- .../loot/openos/opt/core/full_sh.lua | 91 ----- 11 files changed, 363 insertions(+), 629 deletions(-) create mode 100644 src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua delete mode 100644 src/main/resources/assets/opencomputers/loot/openos/lib/pipes.lua diff --git a/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua b/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua index b514508bd..4c92f6574 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua @@ -3,25 +3,11 @@ local process = require("process") --Initialize coroutine library-- local _coroutine = coroutine -- real coroutine backend -_G.coroutine = {} -package.loaded.coroutine = _G.coroutine +_G.coroutine = setmetatable({}, {__index = function(_, key) + return assert(process.info(_coroutine.running()), "thread has no proc").data.coroutine_handler[key] +end}) -for key,value in pairs(_coroutine) do - if type(value) == "function" and value ~= "running" and value ~= "create" then - _G.coroutine[key] = function(...) - local thread = _coroutine.running() - local info = process.info(thread) - -- note the gc thread does not have a process info - assert(info,"process not found for " .. tostring(thread)) - local data = info.data - local co = data.coroutine_handler - local handler = co[key] - return handler(...) - end - else - _G.coroutine[key] = value - end -end +package.loaded.coroutine = _G.coroutine local kernel_load = _G.load local intercept_load @@ -72,7 +58,7 @@ process.list[init_thread] = { { vars={}, io={}, --init will populate this - coroutine_handler=setmetatable({}, {__index=_coroutine}) + coroutine_handler = _coroutine }, instances = setmetatable({}, {__mode="v"}) } diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/event.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/event.lua index e46040654..5e59e6e72 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/event.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/event.lua @@ -7,7 +7,7 @@ local lastInterrupt = -math.huge event.handlers = handlers -function event.register(key, callback, interval, times) +function event.register(key, callback, interval, times, opt_handlers) local handler = { key = key, @@ -17,26 +17,17 @@ function event.register(key, callback, interval, times) } handler.timeout = computer.uptime() + handler.interval + opt_handlers = opt_handlers or handlers local id = 0 repeat id = id + 1 - until not handlers[id] + until not opt_handlers[id] - handlers[id] = handler + opt_handlers[id] = handler return id end -local function time_to_nearest() - local timeout = math.huge - for _,handler in pairs(handlers) do - if timeout > handler.timeout then - timeout = handler.timeout - end - end - return timeout -end - local _pullSignal = computer.pullSignal setmetatable(handlers, {__call=function(_,...)return _pullSignal(...)end}) computer.pullSignal = function(...) -- dispatch @@ -202,7 +193,10 @@ function event.pullFiltered(...) local deadline = seconds and (computer.uptime() + seconds) or math.huge repeat - local closest = math.min(deadline, time_to_nearest()) + local closest = deadline + for _,handler in pairs(handlers) do + closest = math.min(handler.timeout, closest) + end local signal = table.pack(computer.pullSignal(closest - computer.uptime())) if signal.n > 0 then if not (seconds or filter) or filter == nil or filter(table.unpack(signal, 1, signal.n)) then diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/io.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/io.lua index b0d33946c..400761a3a 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/io.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/io.lua @@ -77,7 +77,7 @@ function io.error(file) end function io.popen(prog, mode, env) - return require("pipes").popen(prog, mode, env) + return require("pipe").popen(prog, mode, env) end function io.read(...) diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua new file mode 100644 index 000000000..9b8cb5f06 --- /dev/null +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua @@ -0,0 +1,207 @@ +local process = require("process") +local shell = require("shell") +local buffer = require("buffer") +local command_result_as_code = require("sh").internal.command_result_as_code + +local pipe = {} + +-- root can be a coroutine or a function +function pipe.createCoroutineStack(root, env, name) + checkArg(1, root, "thread", "function") + + if type(root) == "function" then + root = assert(process.load(root, env, nil, name or "pipe"), "failed to load proc data for given function") + end + + local proc = assert(process.info(root), "failed to load proc data for given coroutine") + local _co = proc.data.coroutine_handler + + local pco = setmetatable({root=root}, {__index=_co}) + proc.data.coroutine_handler = pco + + function pco.yield(...) + return _co.yield(nil, ...) + end + function pco.yield_all(...) + return _co.yield(true, ...) + end + function pco.resume(co, ...) + checkArg(1, co, "thread") + local args = table.pack(...) + while true do -- for consecutive sysyields + local result = table.pack(_co.resume(co, table.unpack(args, 1, args.n))) + if result[1] then -- success: (true, sysval?, ...?) + if _co.status(co) == "dead" or pco.root == co then -- return: (true, ...) + return true, table.unpack(result, 2, result.n) + elseif result[2] ~= nil then -- yield: (true, sysval) + args = table.pack(_co.yield(table.unpack(result, 2, result.n))) + else -- yield: (true, nil, ...) + return true, table.unpack(result, 3, result.n) + end + else -- error: result = (false, string) + return false, result[2] + end + end + end + return pco +end + +local pipe_stream = +{ + close = function(self) + self.closed = true + if coroutine.status(self.next) == "suspended" then + coroutine.resume(self.next) + end + self.redirect = {} + end, + seek = function() + return nil, "bad file descriptor" + end, + write = function(self, value) + if not self.redirect[1] and self.closed then + -- if next is dead, ignore all writes + if coroutine.status(self.next) ~= "dead" then + io.stderr:write("attempt to use a closed stream\n") + os.exit(1) + end + elseif self.redirect[1] then + return self.redirect[1]:write(value) + elseif not self.closed then + self.buffer = self.buffer .. value + local result = table.pack(coroutine.resume(self.next)) + if coroutine.status(self.next) == "dead" then + self:close() + -- always cause os.exit when the pipe closes + -- this is very important + -- e.g. cat very_large_file | head + -- when head is done, cat should stop + result[1] = nil + end + -- the next pipe + if not result[1] then + os.exit(command_result_as_code(result[2])) + end + return self + end + os.exit(0) -- abort the current process: SIGPIPE + end, + read = function(self, n) + if self.closed then + return nil -- eof + end + if self.redirect[0] then + -- popen could be using this code path + -- if that is the case, it is important to leave stream.buffer alone + return self.redirect[0]:read(n) + elseif self.buffer == "" then + coroutine.yield() + end + local result = string.sub(self.buffer, 1, n) + self.buffer = string.sub(self.buffer, n + 1) + return result + end +} + +-- prog1 | prog2 | ... | progn +function pipe.buildPipeChain(progs) + local chain = {} + local prev_piped_stream + for i=1,#progs do + local prog = progs[i] + local thread = type(prog) == "thread" and prog or pipe.createCoroutineStack(prog).root + chain[i] = thread + local data = process.info(thread).data + local pio = data.io + + local piped_stream + if i < #progs then + local handle = setmetatable({redirect = {rawget(pio, 1)},buffer = ""}, {__index = pipe_stream}) + piped_stream = buffer.new("rw", handle) + piped_stream:setvbuf("no", 1024) + -- buffer close flushes the buffer, but we have no buffer + -- also, when the buffer is closed, reads and writes don't pass through + -- simply put, we don't want buffer:close + piped_stream.close = function(self) self.stream:close() end + pio[1] = piped_stream + table.insert(data.handles, piped_stream) + end + + if prev_piped_stream then + prev_piped_stream.stream.redirect[0] = rawget(pio, 0) + prev_piped_stream.stream.next = thread + pio[0] = prev_piped_stream + end + + prev_piped_stream = piped_stream + end + + return chain +end + +local chain_stream = +{ + read = function(self, value) + -- handler is currently on yield_all [else we wouldn't have control here] + local stack_ok, read_ok, ret = self.pco.resume(self.pco.root, value) + return select(stack_ok and read_ok and 2 or 1, nil, ret) + end, + write = function(self, ...) + return self:read(table.concat({...})) + end, + close = function(self) + self.io_stream:close() + end, +} + +function pipe.popen(prog, mode, env) + mode = mode or "r" + if mode ~= "r" and mode ~= "w" then + return nil, "bad argument #2: invalid mode " .. tostring(mode) .. " must be r or w" + end + + local r = mode == "r" + local key = r and "read" or "write" + + -- to simplify the code - shell.execute is run within a function to pass (prog, env) + -- if cmd_proc where to come second (mode=="w") then the pipe_proc would have to pass + -- the starting args. which is possible, just more complicated + local cmd_proc = process.load(function() return shell.execute(prog, env) end, nil, nil, prog) + + -- the chain stream is the popen controller + local stream = setmetatable({}, { __index = chain_stream }) + + -- the stream needs its own process for io + local pipe_proc = process.load(function() + local n = r and 0 or "" + while true do + n = stream.pco.yield_all(stream.io_stream[key](stream.io_stream, n)) + end + end, nil, nil, "pipe_handler") + + local pipe_index = r and 2 or 1 + local cmd_index = r and 1 or 2 + local chain = {[cmd_index]=cmd_proc, [pipe_index]=pipe_proc} + + -- upgrade coroutine stack + local cmd_stack = pipe.createCoroutineStack(chain[1]) + + -- the processes need to share the coroutine handler to yield the cmd_stack + process.info(chain[2]).data.coroutine_handler = cmd_stack + + -- link the cmd and pipe proc io + pipe.buildPipeChain(chain) + + -- store handle to io_stream from easy access later + stream.io_stream = process.info(cmd_stack.root).data.io[1].stream + stream.pco = cmd_stack + + -- popen commands start out running, like threads + cmd_stack.resume(cmd_stack.root) + + local buffered_stream = buffer.new(mode, stream) + buffered_stream:setvbuf("no", 1024) + return buffered_stream +end + +return pipe diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/pipes.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/pipes.lua deleted file mode 100644 index 4606b94a9..000000000 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/pipes.lua +++ /dev/null @@ -1,372 +0,0 @@ -local tx = require("transforms") -local shell = require("shell") -local sh = require("sh") -local process = require("process") - -local pipes = {} - -local pipeStream = {} -local function bfd() return nil, "bad file descriptor" end -function pipeStream.new(pm) - local stream = {pm=pm} - local metatable = {__index = pipeStream} - return setmetatable(stream, metatable) -end -function pipeStream:resume() - local yield_args = table.pack(self.pm.pco.resume_all()) - if not yield_args[1] then - self.pm.dead = true - - if not yield_args[1] and yield_args[2] then - io.stderr:write(tostring(yield_args[2]) .. "\n") - end - end - return table.unpack(yield_args) -end -function pipeStream:close() - if self.pm.closed then -- already closed - return - end - - self.pm.closed = true - - -- if our pco stack is empty, we've already run fully - if self.pm.pco.top() == nil then - return - end - - -- if a thread aborted we have set dead true - if self.pm.dead then - return - end - - -- run prog until dead - local co = self.pm.pco.previous_handler - local pco_root = self.pm.threads[1] - if co.status(pco_root) == "dead" then - -- I would have liked the pco stack to unwind itself for dead coroutines - -- maybe I haven't handled aborts correctly - return - end - - return self:resume(true) -end -function pipeStream:read(n) - local pm = self.pm - - if pm.closed then - return bfd() - end - - if pm:buffer() == '' and not pm.dead then - local result = table.pack(self:resume()) - if not result[1] then - -- resume can fail if p1 crashes - self:close() - return nil, "pipe closed unexpectedly" - elseif result.n > 1 and not result[2] then - return result[2], result[3] - end - end - - local result = pm:buffer(n) - if result == '' and pm.dead and n > 0 then - return nil -- eof - end - - return result -end -pipeStream.seek = bfd -function pipeStream:write(v) - local pm = self.pm - if pm.closed or pm.dead then - -- if prog is dead, ignore all writes - if pm.pco.previous_handler.status(pm.threads[pm.self_id]) ~= "dead" then - error("attempt to use a closed stream") - end - return bfd() - end - - pm:buffer(pm:buffer() .. v) - - -- allow handler to push write event - local result = table.pack(self:resume()) - if not result[1] then - -- resume can fail if p1 crashes - pm.dead = true - self:close() - return nil, "pipe closed unexpectedly" - end - - return self -end - -function pipes.createCoroutineStack(fp, init, name) - local _co = process.info().data.coroutine_handler - - local pco = setmetatable( - { - stack = {}, - next = false, - create = _co.create, - wrap = _co.wrap, - previous_handler = _co - }, {__index=_co}) - - function pco.top() - return pco.stack[#pco.stack] - end - function pco.yield(...) - -- pop last - pco.set_unwind(pco.running()) - return _co.yield(...) - end - function pco.index_of(thread) - for i,t in ipairs(pco.stack) do - if t == thread then - return i - end - end - end - function pco.yield_all(...) - local current = pco.running() - local existing_index = pco.index_of(current) - assert(existing_index, "cannot yield inactive stack") - pco.next = current - return _co.yield(...) - end - function pco.set_unwind(from) - pco.next = false - if from then - local index = pco.index_of(from) - if index then - pco.stack = tx.sub(pco.stack, 1, index-1) - pco.next = pco.stack[index-1] or false - end - end - end - function pco.resume_all(...) - local base = pco.stack[1] - local top = pco.top() - if type(base) ~= "thread" or _co.status(base) ~= "suspended" or - type(top) ~= "thread" or _co.status(top) ~= "suspended" then - return false - end - - local status, result = pcall(function(...) - local _result = table.pack(pco.resume(top, ...)) - return _result - end,...) - - if not status then - return nil, result - end - - return table.unpack(result) - end - function pco.resume(thread, ...) - checkArg(1, thread, "thread") - local status = pco.status(thread) - if status ~= "suspended" then - local msg = string.format("cannot resume %s coroutine", - status == "dead" and "dead" or "non-suspended") - return false, msg - end - - local current_index = pco.index_of(pco.running()) - local existing_index = pco.index_of(thread) - - if not existing_index then - assert(current_index, "pco coroutines cannot resume threads outside the stack") - pco.stack = tx.concat(tx.sub(pco.stack, 1, current_index), {thread}) - end - - if current_index then - -- current should be waiting for yield - pco.next = thread - local t = table.pack(_co.yield(...)) -- pass args to resume next - return pco.last == nil and true or pco.last, table.unpack(t,1,t.n) - else - -- the stack is not running - pco.next = false - local yield_args = table.pack(_co.resume(thread, ...)) - if #pco.stack > 0 then - -- thread may have crashed (crash unwinds as well) - -- or we don't have next lined up (unwind) - if not pco.next or not yield_args[1] then - -- unwind from current index, not top - pco.set_unwind(thread) - end - - -- if next is current thread, yield_all is active - -- in such a case, yield out first, then resume where we left off - if pco.next and pco.next ~= thread then - local next = pco.next - pco.next = false - pco.last = yield_args[1] - return pco.resume(next, table.unpack(yield_args,2,yield_args.n)) - end - end - - return table.unpack(yield_args) - end - end - function pco.status(thread) - checkArg(1, thread, "thread") - - local current_index = pco.index_of(pco.running()) - local existing_index = pco.index_of(thread) - - if current_index and existing_index and existing_index < current_index then - local current = pco.stack[current_index] - if current and _co.status(current) == "running" then - return "normal" - end - end - - return _co.status(thread) - end - - if fp then - pco.stack = {process.load(fp,nil,init,name or "pco root")} - process.info(pco.stack[1]).data.coroutine_handler = pco - end - - return pco -end - -local pipeManager = {} -function pipeManager.reader(pm,...) - while pm.pco.status(pm.threads[pm.prog_id]) ~= "dead" do - pm.pco.yield_all() - - -- kick back to main thread, true to kick back one further - if pm.closed then break end - - -- if we are a reader pipe, we leave the buffer alone and yield to previous - if pm.pco.status(pm.threads[pm.prog_id]) ~= "dead" then - pm.pco.yield(...) - end - end - pm.dead = true -end - -function pipeManager:buffer(value) - -- if value but no stream, buffer for buffer - - local s = self and self.pipe and self.pipe.stream - if not s then - if type(value) == "string" or self.prewrite then - self.prewrite = self.prewrite or {} - s = self.prewrite -- s.buffer will be self.prewrite.buffer - else - return '' - end - elseif self.prewrite then -- we stored, previously, a prewrite buffer - s.buffer = self.prewrite.buffer .. s.buffer - self.prewrite = nil - end - - if type(value) == "string" then - s.buffer = value - return value - elseif type(value) ~= "number" then - return s.buffer -- don't truncate - end - - local result = string.sub(s.buffer, 1, value) - s.buffer = string.sub(s.buffer, value + 1) - return result -end - -function pipeManager:redirectRead() - local reader = {pm=self} - function reader:read(n) - local pm = self.pm - local pco = pm.pco - -- if we have any buffer, return it first - - if pm:buffer() == '' and not pm.closed and not pm.dead then - pco.yield_all() - end - - if pm.closed or pm.dead then - return nil - end - - return pm:buffer(n) - end - - return reader -end - -function pipes.createPipe(prog, mode, env) - mode = mode or "r" - if mode ~= "r" and mode ~= "w" then - return nil, "bad argument #2: invalid mode " .. tostring(mode) .. " must be r or w" - end - - local shellPath, reason = shell.resolve(os.getenv("SHELL") or "/bin/sh", "lua") - if not shellPath then - return nil, reason - end - - local pm = setmetatable( - {dead=false,closed=false,prog=prog,mode=mode,env=env}, - {__index=pipeManager} - ) - pm.prog_id = pm.mode == "r" and 1 or 2 - pm.self_id = pm.mode == "r" and 2 or 1 - pm.handler = pm.mode == "r" and - function()return pipeManager.reader(pm)end or - function()pm.dead=true end - - pm.commands = {} - pm.commands[pm.prog_id] = {shellPath, {}} - pm.commands[pm.self_id] = {pm.handler, {}} - - pm.root = function() - local reason - pm.threads, reason = sh.internal.createThreads(pm.commands, pm.env, {[pm.prog_id]=table.pack(pm.env,pm.prog)}) - - if not pm.threads then - pm.dead = true - return false, reason - end - - pm.pipe = process.info(pm.threads[1]).data.io[1] - - -- if we are the writer, we need args to resume prog - if pm.mode == "w" then - pm.pipe.stream.redirect[0] = pm:redirectRead() - end - - return sh.internal.runThreads(pm.threads) - end - - pm.pco = pipes.createCoroutineStack(pm.root) - return pipeStream.new(pm) -end - -function pipes.popen(prog, mode, env) - checkArg(1, prog, "string") - checkArg(2, mode, "string", "nil") - checkArg(3, env, "table", "nil") - - local pipe, reason = pipes.createPipe(prog, mode, env) - - if not pipe then - return false, reason - end - - -- pipe file descriptor - local pfd = require("buffer").new(mode, pipe) - pfd:setvbuf("no", 0) -- 2nd are to read chunk size - - -- popen processes start on create (like a thread) - pfd.stream:resume() - - return pfd -end - -return pipes diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua index 8b064097f..169fd73bf 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/process.lua @@ -104,14 +104,8 @@ function process.load(path, env, init, name) return thread end -function process.running(level) -- kept for backwards compat, prefer process.info - local info = process.info(level) - if info then - return info.path, info.env, info.command - end -end - function process.info(levelOrThread) + checkArg(1, levelOrThread, "thread", "number", "nil") local p if type(levelOrThread) == "thread" then p = process.findProcess(levelOrThread) @@ -141,4 +135,19 @@ function process.internal.close(thread, result) process.list[thread] = nil end +function process.internal.continue(co, ...) + local result = {} + -- Emulate CC behavior by making yields a filtered event.pull() + local args = table.pack(...) + while coroutine.status(co) ~= "dead" do + result = table.pack(coroutine.resume(co, table.unpack(args, 1, args.n))) + if coroutine.status(co) ~= "dead" then + args = table.pack(coroutine.yield(table.unpack(result, 2, result.n))) + elseif not result[1] then + io.stderr:write(result[2]) + end + end + return table.unpack(result, 2, result.n) +end + return process diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/sh.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/sh.lua index 84cc77715..7ff6e33d4 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/sh.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/sh.lua @@ -180,29 +180,12 @@ function sh.internal.createThreads(commands, env, start_args) end if #threads > 1 then - sh.internal.buildPipeChain(threads) + require("pipe").buildPipeChain(threads) end return threads end -function sh.internal.runThreads(threads) - local result = {} - for i = 1, #threads do - -- Emulate CC behavior by making yields a filtered event.pull() - local thread, args = threads[i], {} - while coroutine.status(thread) ~= "dead" do - result = table.pack(coroutine.resume(thread, table.unpack(args))) - if coroutine.status(thread) ~= "dead" then - args = table.pack(coroutine.yield(table.unpack(result, 2, result.n))) - elseif not result[1] then - io.stderr:write(result[2]) - end - end - end - return result[2] -end - function sh.internal.executePipes(pipe_parts, eargs, env) local commands = {} for _,words in ipairs(pipe_parts) do @@ -260,7 +243,7 @@ function sh.internal.executePipes(pipe_parts, eargs, env) local threads, reason = sh.internal.createThreads(commands, env, {[#commands]=eargs}) if not threads then return false, reason end - return sh.internal.runThreads(threads) + return process.internal.continue(threads[1]) end function sh.execute(env, command, ...) diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/shell.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/shell.lua index feac9acff..ea536a71a 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/shell.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/shell.lua @@ -124,16 +124,9 @@ function shell.execute(command, env, ...) if not sh then return false, reason end - local result = table.pack(coroutine.resume(process.load(function(...) - return sh(...) - end), env, command, ...)) - if not result[1] and type(result[2]) == "table" and result[2].reason == "terminated" then - if result[2].code then - return true - else - return false, "terminated" - end - end + local proc = process.load(sh, nil, nil, command) + local result = table.pack(process.internal.continue(proc, env, command, ...)) + if result.n == 0 then return true end return table.unpack(result, 1, result.n) end diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua index f3234a6af..f602e7b77 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/thread.lua @@ -1,49 +1,10 @@ -local pipes = require("pipes") +local pipe = require("pipe") local event = require("event") local process = require("process") local computer = require("computer") local thread = {} - -do - local handlers = event.handlers - local handlers_mt = getmetatable(handlers) - -- the event library sets a metatable on handlers, but we set threaded=true - if not handlers_mt.threaded then - -- find the root process - local root_data - for _,p in pairs(process.list) do - if not p.parent then - root_data = p.data - break - end - end - assert(root_data, "thread library panic: no root proc") - handlers_mt.threaded = true - -- if we don't separate root handlers from thread handlers we see double dispatch - -- because the thread calls dispatch on pull as well - root_data.handlers = {} -- root handlers - root_data.pull = handlers_mt.__call -- the real computer.pullSignal - while true do - local key, value = next(handlers) - if not key then break end - root_data.handlers[key] = value - handlers[key] = nil - end - handlers_mt.__index = function(_, key) - return process.info().data.handlers[key] - end - handlers_mt.__newindex = function(_, key, value) - process.info().data.handlers[key] = value - end - handlers_mt.__pairs = function(_, ...) - return pairs(process.info().data.handlers, ...) - end - handlers_mt.__call = function(tbl, ...) - return process.info().data.pull(tbl, ...) - end - end -end +local init_thread local function waitForDeath(threads, timeout, all) checkArg(1, threads, "table") @@ -57,7 +18,8 @@ local function waitForDeath(threads, timeout, all) local dieing = {} local living = false for _,t in ipairs(threads) do - local result = t.process and t.process.data.result + local mt = getmetatable(t) + local result = mt.attached.data.result local proc_ok = type(result) ~= "table" or result[1] local ready_to_die = t:status() ~= "running" -- suspended is considered dead to exit or not proc_ok -- the thread is killed if its attached process has a non zero exit @@ -133,57 +95,62 @@ function box_thread:status() end function box_thread:join(timeout) - return box_thread_handle.close({self}, timeout) + return waitForDeath({self}, timeout, true) end function box_thread:kill() - self:detach() - if self:status() == "dead" then - return - end - getmetatable(self).__status = "dead" - self.pco.stack = {} + getmetatable(self).close() end function box_thread:detach() - if not self.process then - return - end - local handles = self.process.data.handles - local btHandle = get_box_thread_handle(handles) - if not btHandle then - return nil, "thread failed to detach, process had no thread handle" - end - for i,h in ipairs(btHandle) do - if h == self then - self.process = nil - return table.remove(btHandle, i) - end - end - return nil, "thread not found in parent process" + return self:attach(init_thread) end function box_thread:attach(parent) checkArg(1, parent, "thread", "number", "nil") + local mt = assert(getmetatable(self), "thread panic: no metadata") local proc = process.info(parent) if not proc then return nil, "thread failed to attach, process not found" end - self:detach() + if mt.attached == proc then return self end -- already attached + + local waiting_handler + if mt.attached then + local prev_btHandle = assert(get_box_thread_handle(mt.attached.data.handles), "thread panic: no thread handle") + for i,h in ipairs(prev_btHandle) do + if h == self then + table.remove(prev_btHandle, i) + if mt.id then + waiting_handler = assert(mt.attached.data.handlers[mt.id], "thread panic: no event handler") + mt.attached.data.handlers[mt.id] = nil + end + break + end + end + end + -- attach to parent or the current process - self.process = proc - local handles = self.process.data.handles + mt.attached = proc + local handles = proc.data.handles -- this process may not have a box_thread manager handle local btHandle = get_box_thread_handle(handles, true) table.insert(btHandle, self) - return true + + if waiting_handler then -- event-waiting + mt.register(waiting_handler.timeout - computer.uptime()) + end + + return self end function thread.create(fp, ...) checkArg(1, fp, "function") - local t = setmetatable({}, {__status="suspended",__index=box_thread}) - t.pco = pipes.createCoroutineStack(function(...) - getmetatable(t).__status = "running" + local t = {} + local mt = {__status="suspended",__index=box_thread} + setmetatable(t, mt) + t.pco = pipe.createCoroutineStack(function(...) + mt.__status = "running" local fp_co = t.pco.create(fp) -- run fp_co until dead -- pullSignal will yield_all past this point @@ -217,34 +184,35 @@ function thread.create(fp, ...) end args = table.pack(event.pull(table.unpack(result, 2, result.n))) end - end) + end, nil, "thread") --special resume to keep track of process death - local function private_resume(...) - local result = table.pack(t.pco.resume_all(...)) - if #t.pco.stack == 0 then - t:detach() - local mt = getmetatable(t) - mt.__status = "dead" - event.push("thread_exit") + function mt.private_resume(...) + mt.id = nil + -- this thread may have been killed + if t:status() == "dead" then return end + local result = table.pack(t.pco.resume(t.pco.root, ...)) + if t.pco.status(t.pco.root) == "dead" then + mt.close() end return table.unpack(result, 1, result.n) end - local data = process.info(t.pco.stack[1]).data - data.handlers = {} - data.pull = function(_, timeout) - -- register a timeout handler - -- hack so that event.register sees the root handlers - local data_handlers = data.handlers - data.handlers = process.info(2).data.handlers - event.register( - nil, -- nil key matches anything, timers use false keys - private_resume, - timeout, -- wait for the time specified by the caller - 1) -- we only want this thread to wake up once - data.handlers = data_handlers + mt.process = process.list[t.pco.root] + mt.process.data.handlers = {} + function mt.register(timeout) + -- register a timeout handler + mt.id = event.register( + nil, -- nil key matches anything, timers use false keys + mt.private_resume, + timeout, -- wait for the time specified by the caller + 1, -- we only want this thread to wake up once + mt.attached.data.handlers) -- optional arg, to specify our own handlers + end + + function mt.process.data.pull(_, timeout) + mt.register(timeout) -- yield_all will yield this pco stack -- the callback will resume this stack local event_data @@ -255,10 +223,68 @@ function thread.create(fp, ...) return table.unpack(event_data, 1, event_data.n) end - t:attach() - private_resume(...) -- threads start out running + function mt.close() + if t:status() == "dead" then + return + end + local htm = get_box_thread_handle(mt.attached.data.handles) + for _,ht in ipairs(htm) do + if ht == t then + table.remove(htm, _) + break + end + end + mt.__status = "dead" + event.push("thread_exit") + end + + t:attach() -- the current process + mt.private_resume(...) -- threads start out running return t end +do + local handlers = event.handlers + local handlers_mt = getmetatable(handlers) + -- the event library sets a metatable on handlers, but we set threaded=true + if not handlers_mt.threaded then + -- find the root process + local root_data + for t,p in pairs(process.list) do + if not p.parent then + init_thread = t + root_data = p.data + break + end + end + assert(init_thread, "thread library panic: no init thread") + handlers_mt.threaded = true + -- handles might be optimized out for memory + root_data.handles = root_data.handles or {} + -- if we don't separate root handlers from thread handlers we see double dispatch + -- because the thread calls dispatch on pull as well + root_data.handlers = {} -- root handlers + root_data.pull = handlers_mt.__call -- the real computer.pullSignal + while true do + local key, value = next(handlers) + if not key then break end + root_data.handlers[key] = value + handlers[key] = nil + end + handlers_mt.__index = function(_, key) + return process.info().data.handlers[key] + end + handlers_mt.__newindex = function(_, key, value) + process.info().data.handlers[key] = value + end + handlers_mt.__pairs = function(_, ...) + return pairs(process.info().data.handlers, ...) + end + handlers_mt.__call = function(tbl, ...) + return process.info().data.pull(tbl, ...) + end + end +end + return thread diff --git a/src/main/resources/assets/opencomputers/loot/openos/opt/core/boot.lua b/src/main/resources/assets/opencomputers/loot/openos/opt/core/boot.lua index aeecf5f43..7ad88d429 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/opt/core/boot.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/opt/core/boot.lua @@ -1,7 +1,7 @@ -- called from /init.lua local raw_loadfile = ... -_G._OSVERSION = "OpenOS 1.6.4" +_G._OSVERSION = "OpenOS 1.6.5" local component = component local computer = computer @@ -108,7 +108,6 @@ status("Initializing file system...") -- Mount the ROM and temporary file systems to allow working on the file -- system module from this point on. require("filesystem").mount(computer.getBootAddress(), "/") -package.preload={} status("Running boot scripts...") diff --git a/src/main/resources/assets/opencomputers/loot/openos/opt/core/full_sh.lua b/src/main/resources/assets/opencomputers/loot/openos/opt/core/full_sh.lua index cb7243325..5f29fb565 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/opt/core/full_sh.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/opt/core/full_sh.lua @@ -99,36 +99,6 @@ function sh.internal.openCommandRedirects(redirects) end end -function sh.internal.buildPipeChain(threads) - local prev_pipe - for i=1,#threads do - local thread = threads[i] - local data = process.info(thread).data - local pio = data.io - - local pipe - if i < #threads then - pipe = require("buffer").new("rw", sh.internal.newMemoryStream()) - pipe:setvbuf("no", 0) - -- buffer close flushes the buffer, but we have no buffer - -- also, when the buffer is closed, read and writes don't pass through - -- simply put, we don't want buffer:close - pipe.close = function(self) self.stream:close() end - pipe.stream.redirect[1] = rawget(pio, 1) - pio[1] = pipe - table.insert(data.handles, pipe) - end - - if prev_pipe then - prev_pipe.stream.redirect[0] = rawget(pio, 0) - prev_pipe.stream.next = thread - pio[0] = prev_pipe - end - - prev_pipe = pipe - end -end - -- takes an eword, returns a list of glob hits or {word} if no globs exist function sh.internal.glob(eword) -- words are parts, parts are txt and qr @@ -524,67 +494,6 @@ function sh.internal.remove_negation(chain) return false end -function sh.internal.newMemoryStream() - local memoryStream = {} - - function memoryStream:close() - self.closed = true - self.redirect = {} - end - - function memoryStream:seek() - return nil, "bad file descriptor" - end - - function memoryStream:read(n) - if self.closed then - return nil -- eof - end - if self.redirect[0] then - -- popen could be using this code path - -- if that is the case, it is important to leave stream.buffer alone - return self.redirect[0]:read(n) - elseif self.buffer == "" then - coroutine.yield() - end - local result = string.sub(self.buffer, 1, n) - self.buffer = string.sub(self.buffer, n + 1) - return result - end - - function memoryStream:write(value) - if not self.redirect[1] and self.closed then - -- if next is dead, ignore all writes - if coroutine.status(self.next) ~= "dead" then - io.stderr:write("attempt to use a closed stream\n") - os.exit(1) - end - elseif self.redirect[1] then - return self.redirect[1]:write(value) - elseif not self.closed then - self.buffer = self.buffer .. value - local result = table.pack(coroutine.resume(self.next)) - if coroutine.status(self.next) == "dead" then - self:close() - -- always cause os.exit when the pipe closes - result[1] = nil - end - -- the next pipe - if not result[1] then - os.exit(sh.internal.command_result_as_code(result[2])) - end - return self - end - os.exit(0) -- abort the current process: SIGPIPE - end - - local stream = {closed = false, buffer = "", - redirect = {}, result = {}} - local metatable = {__index = memoryStream, - __metatable = "memorystream"} - return setmetatable(stream, metatable) -end - function sh.internal.execute_complex(statements, eargs, env) for si=1,#statements do local s = statements[si] local chains = sh.internal.groupChains(s) From e684e588ef29d6bc9416963d704d8be8191441cf Mon Sep 17 00:00:00 2001 From: payonel Date: Wed, 21 Jun 2017 00:15:26 -0700 Subject: [PATCH 6/7] support yield through pipes --- .../opencomputers/loot/openos/bin/ps.lua | 150 ++++++++++++++++++ .../loot/openos/boot/01_process.lua | 15 +- .../opencomputers/loot/openos/lib/pipe.lua | 116 ++++++++------ 3 files changed, 228 insertions(+), 53 deletions(-) create mode 100644 src/main/resources/assets/opencomputers/loot/openos/bin/ps.lua diff --git a/src/main/resources/assets/opencomputers/loot/openos/bin/ps.lua b/src/main/resources/assets/opencomputers/loot/openos/bin/ps.lua new file mode 100644 index 000000000..e47180519 --- /dev/null +++ b/src/main/resources/assets/opencomputers/loot/openos/bin/ps.lua @@ -0,0 +1,150 @@ +local process = require("process") +local unicode = require("unicode") +local event = require("event") +local event_mt = getmetatable(event.handlers) + +-- WARNING this code does not use official kernel API and is likely to change + +local data = {} +local widths = {} +local sorted = {} +local moved_indexes = {} + +local elbow = unicode.char(0x2514) + +local function thread_id(t,p) + if t then + return tostring(t):gsub("^thread: 0x", "") + end + -- find the parent thread + for k,v in pairs(process.list) do + if v == p then + return thread_id(k) + end + end + return "-" +end + +local cols = +{ + {"PID", thread_id}, + {"EVENTS", function(_,p) + local handlers = {} + if event_mt.threaded then + handlers = rawget(p.data, "handlers") or {} + elseif not p.parent then + handlers = event.handlers + end + local count = 0 + for _ in pairs(handlers) do + count = count + 1 + end + return count == 0 and "-" or tostring(count) + end}, + {"THREADS", function(_,p) + -- threads are handles with mt.close + local count = 0 + for _,h in pairs(p.data.handles or {}) do + local mt = getmetatable(h) + if mt and mt.__index and mt.__index.close then + count = count + #h + break -- there is only one thread handle manager + end + end + return count == 0 and "-" or tostring(count) + end}, + {"PARENT", function(_,p) + for _,process_info in pairs(process.list) do + for _,handle in pairs(process_info.data.handles or {}) do + local mt = getmetatable(handle) + if mt and mt.__index and mt.__index.close then + for _,ht in ipairs(handle) do + local ht_mt = getmetatable(ht) + if ht_mt.process == p then + return thread_id(nil,process_info) + end + end + break + end + end + end + return thread_id(nil,p.parent) + end}, + {"CMD", function(_,p) return p.command end}, +} + +local function add_field(key, value) + if not data[key] then data[key] = {} end + table.insert(data[key], value) + widths[key] = math.max(widths[key] or 0, #value) +end + +for _,key in ipairs(cols) do + add_field(key[1], key[1]) +end + +for thread_handle, process_info in pairs(process.list) do + for _,key in ipairs(cols) do + add_field(key[1], key[2](thread_handle, process_info)) + end +end + +local parent_index +for index,set in ipairs(cols) do + if set[1] == "PARENT" then + parent_index = index + break + end +end +assert(parent_index, "did not find a parent column") + +local function move_to_sorted(index) + if moved_indexes[index] then + return false + end + local entry = {} + for k,v in pairs(data) do + entry[k] = v[index] + end + sorted[#sorted + 1] = entry + moved_indexes[index] = true + return true +end + +local function make_elbow(depth) + return (" "):rep(depth - 1) .. (depth > 0 and elbow or "") +end + +-- remove COLUMN labels to simplify sort +move_to_sorted(1) + +local function update_family(parent, depth) + depth = depth or 0 + parent = parent or "-" + for index in ipairs(data.PID) do + local this_parent = data[cols[parent_index][1]][index] + if this_parent == parent then + local dash_cmd = make_elbow(depth) .. data.CMD[index] + data.CMD[index] = dash_cmd + widths.CMD = math.max(widths.CMD or 0, #dash_cmd) + if move_to_sorted(index) then + update_family(data.PID[index], depth + 1) + end + end + end +end + +update_family() +table.remove(cols, parent_index) -- don't show parent id + +for _,set in ipairs(sorted) do + local split = "" + for _,key in ipairs(cols) do + local label = key[1] + local format = split .. "%-" .. tostring(widths[label]) .. "s" + io.write(string.format(format, set[label])) + split = " " + end + print() +end + diff --git a/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua b/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua index 4c92f6574..9bab80513 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/boot/01_process.lua @@ -3,9 +3,18 @@ local process = require("process") --Initialize coroutine library-- local _coroutine = coroutine -- real coroutine backend -_G.coroutine = setmetatable({}, {__index = function(_, key) - return assert(process.info(_coroutine.running()), "thread has no proc").data.coroutine_handler[key] -end}) +_G.coroutine = setmetatable( + { + resume = function(co, ...) + return assert(process.info(co), "thread has no proc").data.coroutine_handler.resume(co, ...) + end + }, + { + __index = function(_, key) + return assert(process.info(_coroutine.running()), "thread has no proc").data.coroutine_handler[key] + end + } +) package.loaded.coroutine = _G.coroutine diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua index 9b8cb5f06..4f16688cd 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua @@ -4,6 +4,7 @@ local buffer = require("buffer") local command_result_as_code = require("sh").internal.command_result_as_code local pipe = {} +local _root_co = assert(process.info(), "process metadata failed to load").data.coroutine_handler -- root can be a coroutine or a function function pipe.createCoroutineStack(root, env, name) @@ -13,33 +14,28 @@ function pipe.createCoroutineStack(root, env, name) root = assert(process.load(root, env, nil, name or "pipe"), "failed to load proc data for given function") end - local proc = assert(process.info(root), "failed to load proc data for given coroutine") - local _co = proc.data.coroutine_handler + local proc = assert(process.list[root], "coroutine must be a process thread else the parent process is corrupted") - local pco = setmetatable({root=root}, {__index=_co}) + local pco = setmetatable({root=root}, {__index=_root_co}) proc.data.coroutine_handler = pco function pco.yield(...) - return _co.yield(nil, ...) + return _root_co.yield(nil, ...) end function pco.yield_all(...) - return _co.yield(true, ...) + return _root_co.yield(true, ...) end function pco.resume(co, ...) checkArg(1, co, "thread") local args = table.pack(...) while true do -- for consecutive sysyields - local result = table.pack(_co.resume(co, table.unpack(args, 1, args.n))) - if result[1] then -- success: (true, sysval?, ...?) - if _co.status(co) == "dead" or pco.root == co then -- return: (true, ...) - return true, table.unpack(result, 2, result.n) - elseif result[2] ~= nil then -- yield: (true, sysval) - args = table.pack(_co.yield(table.unpack(result, 2, result.n))) - else -- yield: (true, nil, ...) - return true, table.unpack(result, 3, result.n) - end - else -- error: result = (false, string) - return false, result[2] + local result = table.pack(_root_co.resume(co, table.unpack(args, 1, args.n))) + if not result[1] or _root_co.status(co) == "dead" then + return table.unpack(result, 1, result.n) + elseif result[2] and pco.root ~= co then + args = table.pack(_root_co.yield(table.unpack(result, 2, result.n))) + else + return true, table.unpack(result, 3, result.n) end end end @@ -48,10 +44,40 @@ end local pipe_stream = { + continue = function(self, exit) + local result = table.pack(coroutine.resume(self.next)) + while true do -- repeat resumes if B (A|B) makes a natural yield + -- if B crashed or closed in the last resume + -- then we can close the stream + if coroutine.status(self.next) == "dead" then + self:close() + -- always cause os.exit when the pipe closes + -- this is very important + -- e.g. cat very_large_file | head + -- when head is done, cat should stop + result[1] = nil + end + -- the pipe closed or crashed + if not result[1] then + if exit then + os.exit(command_result_as_code(result[2])) + end + return self + end + -- next is suspended, read_mode indicates why + if self.read_mode then + -- B wants A to write again, resume A + return self + end + -- not reading, it is requesting a yield + result = table.pack(coroutine.yield_all(table.unpack(result, 2, result.n))) + result = table.pack(coroutine.resume(self.next, table.unpack(result, 1, result.n))) -- the request was for an event + end + end, close = function(self) self.closed = true if coroutine.status(self.next) == "suspended" then - coroutine.resume(self.next) + self:continue() end self.redirect = {} end, @@ -69,20 +95,7 @@ local pipe_stream = return self.redirect[1]:write(value) elseif not self.closed then self.buffer = self.buffer .. value - local result = table.pack(coroutine.resume(self.next)) - if coroutine.status(self.next) == "dead" then - self:close() - -- always cause os.exit when the pipe closes - -- this is very important - -- e.g. cat very_large_file | head - -- when head is done, cat should stop - result[1] = nil - end - -- the next pipe - if not result[1] then - os.exit(command_result_as_code(result[2])) - end - return self + return self:continue(true) end os.exit(0) -- abort the current process: SIGPIPE end, @@ -95,7 +108,13 @@ local pipe_stream = -- if that is the case, it is important to leave stream.buffer alone return self.redirect[0]:read(n) elseif self.buffer == "" then - coroutine.yield() + -- the pipe_stream write resume is waiting on this process B (A|B) to yield + -- yield here requests A to output again. However, B may elsewhere want a + -- natural yield (i.e. for events). To differentiate this yield from natural + -- yields we set read_mode here, which the pipe_stream write detects + self.read_mode = true + coroutine.yield_all() + self.read_mode = false end local result = string.sub(self.buffer, 1, n) self.buffer = string.sub(self.buffer, n + 1) @@ -108,8 +127,10 @@ function pipe.buildPipeChain(progs) local chain = {} local prev_piped_stream for i=1,#progs do - local prog = progs[i] - local thread = type(prog) == "thread" and prog or pipe.createCoroutineStack(prog).root + local thread = progs[i] + -- A needs to be a stack in case any thread in A call write and then B natural yields + -- B needs to be a stack in case any thread in B calls read + pipe.createCoroutineStack(thread) chain[i] = thread local data = process.info(thread).data local pio = data.io @@ -119,10 +140,6 @@ function pipe.buildPipeChain(progs) local handle = setmetatable({redirect = {rawget(pio, 1)},buffer = ""}, {__index = pipe_stream}) piped_stream = buffer.new("rw", handle) piped_stream:setvbuf("no", 1024) - -- buffer close flushes the buffer, but we have no buffer - -- also, when the buffer is closed, reads and writes don't pass through - -- simply put, we don't want buffer:close - piped_stream.close = function(self) self.stream:close() end pio[1] = piped_stream table.insert(data.handles, piped_stream) end @@ -142,9 +159,12 @@ end local chain_stream = { read = function(self, value) - -- handler is currently on yield_all [else we wouldn't have control here] - local stack_ok, read_ok, ret = self.pco.resume(self.pco.root, value) - return select(stack_ok and read_ok and 2 or 1, nil, ret) + if self.io_stream.closed then return nil end + -- handler is currently on yield all [else we wouldn't have control here] + local read_ok, ret = self.pco.resume(self.pco.root, value) + -- ret can be non string when a process ends + ret = type(ret) == "string" and ret or nil + return select(read_ok and 2 or 1, nil, ret) end, write = function(self, ...) return self:read(table.concat({...})) @@ -174,8 +194,9 @@ function pipe.popen(prog, mode, env) -- the stream needs its own process for io local pipe_proc = process.load(function() local n = r and 0 or "" - while true do - n = stream.pco.yield_all(stream.io_stream[key](stream.io_stream, n)) + local ios = stream.io_stream + while not ios.closed do + n = coroutine.yield_all(ios[key](ios, n)) end end, nil, nil, "pipe_handler") @@ -183,17 +204,12 @@ function pipe.popen(prog, mode, env) local cmd_index = r and 1 or 2 local chain = {[cmd_index]=cmd_proc, [pipe_index]=pipe_proc} - -- upgrade coroutine stack - local cmd_stack = pipe.createCoroutineStack(chain[1]) - - -- the processes need to share the coroutine handler to yield the cmd_stack - process.info(chain[2]).data.coroutine_handler = cmd_stack - -- link the cmd and pipe proc io pipe.buildPipeChain(chain) + local cmd_stack = process.info(chain[1]).data.coroutine_handler -- store handle to io_stream from easy access later - stream.io_stream = process.info(cmd_stack.root).data.io[1].stream + stream.io_stream = process.info(chain[1]).data.io[1].stream stream.pco = cmd_stack -- popen commands start out running, like threads From ed99999119272c7feae746363bae567b2f50bd1d Mon Sep 17 00:00:00 2001 From: payonel Date: Wed, 21 Jun 2017 00:45:00 -0700 Subject: [PATCH 7/7] tty needs to load full when loading term library due to meta-metatabling --- .../resources/assets/opencomputers/loot/openos/lib/term.lua | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua b/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua index 3cac78a56..cd1021d21 100644 --- a/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua +++ b/src/main/resources/assets/opencomputers/loot/openos/lib/term.lua @@ -6,7 +6,10 @@ local process = require("process") local kb = require("keyboard") local keys = kb.keys -local term = setmetatable({internal={}}, {__index=tty}) +-- tty is bisected into a delay loaded library +-- term indexing will fail to use full_tty unless tty is fully loaded +-- accessing tty.full_tty [a nonexistent field] will cause that full load +local term = setmetatable({internal={},tty.full_tty}, {__index=tty}) function term.internal.window() return process.info().data.window