Merge remote-tracking branch 'origin/master-MC1.9.4' into master-MC1.10

# Conflicts:
#	src/main/scala/li/cil/oc/util/ExtendedArguments.scala
This commit is contained in:
Vexatos 2017-06-21 10:17:31 +02:00
commit 4dd3dcfb4b
20 changed files with 626 additions and 644 deletions

View File

@ -633,9 +633,6 @@ do
local x, y, w, h = getArea() local x, y, w, h = getArea()
local chars = 0 local chars = 0
for line in f:lines() do for line in f:lines() do
if line:sub(-1) == "\r" then
line = line:sub(1, -2)
end
table.insert(buffer, line) table.insert(buffer, line)
chars = chars + unicode.len(line) chars = chars + unicode.len(line)
if #buffer <= h then if #buffer <= h then

View File

@ -0,0 +1,150 @@
local process = require("process")
local unicode = require("unicode")
local event = require("event")
local event_mt = getmetatable(event.handlers)
-- WARNING this code does not use official kernel API and is likely to change
local data = {}
local widths = {}
local sorted = {}
local moved_indexes = {}
local elbow = unicode.char(0x2514)
local function thread_id(t,p)
if t then
return tostring(t):gsub("^thread: 0x", "")
end
-- find the parent thread
for k,v in pairs(process.list) do
if v == p then
return thread_id(k)
end
end
return "-"
end
local cols =
{
{"PID", thread_id},
{"EVENTS", function(_,p)
local handlers = {}
if event_mt.threaded then
handlers = rawget(p.data, "handlers") or {}
elseif not p.parent then
handlers = event.handlers
end
local count = 0
for _ in pairs(handlers) do
count = count + 1
end
return count == 0 and "-" or tostring(count)
end},
{"THREADS", function(_,p)
-- threads are handles with mt.close
local count = 0
for _,h in pairs(p.data.handles or {}) do
local mt = getmetatable(h)
if mt and mt.__index and mt.__index.close then
count = count + #h
break -- there is only one thread handle manager
end
end
return count == 0 and "-" or tostring(count)
end},
{"PARENT", function(_,p)
for _,process_info in pairs(process.list) do
for _,handle in pairs(process_info.data.handles or {}) do
local mt = getmetatable(handle)
if mt and mt.__index and mt.__index.close then
for _,ht in ipairs(handle) do
local ht_mt = getmetatable(ht)
if ht_mt.process == p then
return thread_id(nil,process_info)
end
end
break
end
end
end
return thread_id(nil,p.parent)
end},
{"CMD", function(_,p) return p.command end},
}
local function add_field(key, value)
if not data[key] then data[key] = {} end
table.insert(data[key], value)
widths[key] = math.max(widths[key] or 0, #value)
end
for _,key in ipairs(cols) do
add_field(key[1], key[1])
end
for thread_handle, process_info in pairs(process.list) do
for _,key in ipairs(cols) do
add_field(key[1], key[2](thread_handle, process_info))
end
end
local parent_index
for index,set in ipairs(cols) do
if set[1] == "PARENT" then
parent_index = index
break
end
end
assert(parent_index, "did not find a parent column")
local function move_to_sorted(index)
if moved_indexes[index] then
return false
end
local entry = {}
for k,v in pairs(data) do
entry[k] = v[index]
end
sorted[#sorted + 1] = entry
moved_indexes[index] = true
return true
end
local function make_elbow(depth)
return (" "):rep(depth - 1) .. (depth > 0 and elbow or "")
end
-- remove COLUMN labels to simplify sort
move_to_sorted(1)
local function update_family(parent, depth)
depth = depth or 0
parent = parent or "-"
for index in ipairs(data.PID) do
local this_parent = data[cols[parent_index][1]][index]
if this_parent == parent then
local dash_cmd = make_elbow(depth) .. data.CMD[index]
data.CMD[index] = dash_cmd
widths.CMD = math.max(widths.CMD or 0, #dash_cmd)
if move_to_sorted(index) then
update_family(data.PID[index], depth + 1)
end
end
end
end
update_family()
table.remove(cols, parent_index) -- don't show parent id
for _,set in ipairs(sorted) do
local split = ""
for _,key in ipairs(cols) do
local label = key[1]
local format = split .. "%-" .. tostring(widths[label]) .. "s"
io.write(string.format(format, set[label]))
split = " "
end
print()
end

View File

@ -3,25 +3,20 @@ local process = require("process")
--Initialize coroutine library-- --Initialize coroutine library--
local _coroutine = coroutine -- real coroutine backend local _coroutine = coroutine -- real coroutine backend
_G.coroutine = {} _G.coroutine = setmetatable(
package.loaded.coroutine = _G.coroutine {
resume = function(co, ...)
for key,value in pairs(_coroutine) do return assert(process.info(co), "thread has no proc").data.coroutine_handler.resume(co, ...)
if type(value) == "function" and value ~= "running" and value ~= "create" then
_G.coroutine[key] = function(...)
local thread = _coroutine.running()
local info = process.info(thread)
-- note the gc thread does not have a process info
assert(info,"process not found for " .. tostring(thread))
local data = info.data
local co = data.coroutine_handler
local handler = co[key]
return handler(...)
end end
else },
_G.coroutine[key] = value {
end __index = function(_, key)
end return assert(process.info(_coroutine.running()), "thread has no proc").data.coroutine_handler[key]
end
}
)
package.loaded.coroutine = _G.coroutine
local kernel_load = _G.load local kernel_load = _G.load
local intercept_load local intercept_load
@ -72,7 +67,7 @@ process.list[init_thread] = {
{ {
vars={}, vars={},
io={}, --init will populate this io={}, --init will populate this
coroutine_handler=setmetatable({}, {__index=_coroutine}) coroutine_handler = _coroutine
}, },
instances = setmetatable({}, {__mode="v"}) instances = setmetatable({}, {__mode="v"})
} }

View File

@ -7,7 +7,7 @@ local lastInterrupt = -math.huge
event.handlers = handlers event.handlers = handlers
function event.register(key, callback, interval, times) function event.register(key, callback, interval, times, opt_handlers)
local handler = local handler =
{ {
key = key, key = key,
@ -17,26 +17,17 @@ function event.register(key, callback, interval, times)
} }
handler.timeout = computer.uptime() + handler.interval handler.timeout = computer.uptime() + handler.interval
opt_handlers = opt_handlers or handlers
local id = 0 local id = 0
repeat repeat
id = id + 1 id = id + 1
until not handlers[id] until not opt_handlers[id]
handlers[id] = handler opt_handlers[id] = handler
return id return id
end end
local function time_to_nearest()
local timeout = math.huge
for _,handler in pairs(handlers) do
if timeout > handler.timeout then
timeout = handler.timeout
end
end
return timeout
end
local _pullSignal = computer.pullSignal local _pullSignal = computer.pullSignal
setmetatable(handlers, {__call=function(_,...)return _pullSignal(...)end}) setmetatable(handlers, {__call=function(_,...)return _pullSignal(...)end})
computer.pullSignal = function(...) -- dispatch computer.pullSignal = function(...) -- dispatch
@ -202,7 +193,10 @@ function event.pullFiltered(...)
local deadline = seconds and (computer.uptime() + seconds) or math.huge local deadline = seconds and (computer.uptime() + seconds) or math.huge
repeat repeat
local closest = math.min(deadline, time_to_nearest()) local closest = deadline
for _,handler in pairs(handlers) do
closest = math.min(handler.timeout, closest)
end
local signal = table.pack(computer.pullSignal(closest - computer.uptime())) local signal = table.pack(computer.pullSignal(closest - computer.uptime()))
if signal.n > 0 then if signal.n > 0 then
if not (seconds or filter) or filter == nil or filter(table.unpack(signal, 1, signal.n)) then if not (seconds or filter) or filter == nil or filter(table.unpack(signal, 1, signal.n)) then

View File

@ -77,7 +77,7 @@ function io.error(file)
end end
function io.popen(prog, mode, env) function io.popen(prog, mode, env)
return require("pipes").popen(prog, mode, env) return require("pipe").popen(prog, mode, env)
end end
function io.read(...) function io.read(...)

View File

@ -0,0 +1,223 @@
local process = require("process")
local shell = require("shell")
local buffer = require("buffer")
local command_result_as_code = require("sh").internal.command_result_as_code
local pipe = {}
local _root_co = assert(process.info(), "process metadata failed to load").data.coroutine_handler
-- root can be a coroutine or a function
function pipe.createCoroutineStack(root, env, name)
checkArg(1, root, "thread", "function")
if type(root) == "function" then
root = assert(process.load(root, env, nil, name or "pipe"), "failed to load proc data for given function")
end
local proc = assert(process.list[root], "coroutine must be a process thread else the parent process is corrupted")
local pco = setmetatable({root=root}, {__index=_root_co})
proc.data.coroutine_handler = pco
function pco.yield(...)
return _root_co.yield(nil, ...)
end
function pco.yield_all(...)
return _root_co.yield(true, ...)
end
function pco.resume(co, ...)
checkArg(1, co, "thread")
local args = table.pack(...)
while true do -- for consecutive sysyields
local result = table.pack(_root_co.resume(co, table.unpack(args, 1, args.n)))
if not result[1] or _root_co.status(co) == "dead" then
return table.unpack(result, 1, result.n)
elseif result[2] and pco.root ~= co then
args = table.pack(_root_co.yield(table.unpack(result, 2, result.n)))
else
return true, table.unpack(result, 3, result.n)
end
end
end
return pco
end
local pipe_stream =
{
continue = function(self, exit)
local result = table.pack(coroutine.resume(self.next))
while true do -- repeat resumes if B (A|B) makes a natural yield
-- if B crashed or closed in the last resume
-- then we can close the stream
if coroutine.status(self.next) == "dead" then
self:close()
-- always cause os.exit when the pipe closes
-- this is very important
-- e.g. cat very_large_file | head
-- when head is done, cat should stop
result[1] = nil
end
-- the pipe closed or crashed
if not result[1] then
if exit then
os.exit(command_result_as_code(result[2]))
end
return self
end
-- next is suspended, read_mode indicates why
if self.read_mode then
-- B wants A to write again, resume A
return self
end
-- not reading, it is requesting a yield
result = table.pack(coroutine.yield_all(table.unpack(result, 2, result.n)))
result = table.pack(coroutine.resume(self.next, table.unpack(result, 1, result.n))) -- the request was for an event
end
end,
close = function(self)
self.closed = true
if coroutine.status(self.next) == "suspended" then
self:continue()
end
self.redirect = {}
end,
seek = function()
return nil, "bad file descriptor"
end,
write = function(self, value)
if not self.redirect[1] and self.closed then
-- if next is dead, ignore all writes
if coroutine.status(self.next) ~= "dead" then
io.stderr:write("attempt to use a closed stream\n")
os.exit(1)
end
elseif self.redirect[1] then
return self.redirect[1]:write(value)
elseif not self.closed then
self.buffer = self.buffer .. value
return self:continue(true)
end
os.exit(0) -- abort the current process: SIGPIPE
end,
read = function(self, n)
if self.closed then
return nil -- eof
end
if self.redirect[0] then
-- popen could be using this code path
-- if that is the case, it is important to leave stream.buffer alone
return self.redirect[0]:read(n)
elseif self.buffer == "" then
-- the pipe_stream write resume is waiting on this process B (A|B) to yield
-- yield here requests A to output again. However, B may elsewhere want a
-- natural yield (i.e. for events). To differentiate this yield from natural
-- yields we set read_mode here, which the pipe_stream write detects
self.read_mode = true
coroutine.yield_all()
self.read_mode = false
end
local result = string.sub(self.buffer, 1, n)
self.buffer = string.sub(self.buffer, n + 1)
return result
end
}
-- prog1 | prog2 | ... | progn
function pipe.buildPipeChain(progs)
local chain = {}
local prev_piped_stream
for i=1,#progs do
local thread = progs[i]
-- A needs to be a stack in case any thread in A call write and then B natural yields
-- B needs to be a stack in case any thread in B calls read
pipe.createCoroutineStack(thread)
chain[i] = thread
local data = process.info(thread).data
local pio = data.io
local piped_stream
if i < #progs then
local handle = setmetatable({redirect = {rawget(pio, 1)},buffer = ""}, {__index = pipe_stream})
piped_stream = buffer.new("rw", handle)
piped_stream:setvbuf("no", 1024)
pio[1] = piped_stream
table.insert(data.handles, piped_stream)
end
if prev_piped_stream then
prev_piped_stream.stream.redirect[0] = rawget(pio, 0)
prev_piped_stream.stream.next = thread
pio[0] = prev_piped_stream
end
prev_piped_stream = piped_stream
end
return chain
end
local chain_stream =
{
read = function(self, value)
if self.io_stream.closed then return nil end
-- handler is currently on yield all [else we wouldn't have control here]
local read_ok, ret = self.pco.resume(self.pco.root, value)
-- ret can be non string when a process ends
ret = type(ret) == "string" and ret or nil
return select(read_ok and 2 or 1, nil, ret)
end,
write = function(self, ...)
return self:read(table.concat({...}))
end,
close = function(self)
self.io_stream:close()
end,
}
function pipe.popen(prog, mode, env)
mode = mode or "r"
if mode ~= "r" and mode ~= "w" then
return nil, "bad argument #2: invalid mode " .. tostring(mode) .. " must be r or w"
end
local r = mode == "r"
local key = r and "read" or "write"
-- to simplify the code - shell.execute is run within a function to pass (prog, env)
-- if cmd_proc where to come second (mode=="w") then the pipe_proc would have to pass
-- the starting args. which is possible, just more complicated
local cmd_proc = process.load(function() return shell.execute(prog, env) end, nil, nil, prog)
-- the chain stream is the popen controller
local stream = setmetatable({}, { __index = chain_stream })
-- the stream needs its own process for io
local pipe_proc = process.load(function()
local n = r and 0 or ""
local ios = stream.io_stream
while not ios.closed do
n = coroutine.yield_all(ios[key](ios, n))
end
end, nil, nil, "pipe_handler")
local pipe_index = r and 2 or 1
local cmd_index = r and 1 or 2
local chain = {[cmd_index]=cmd_proc, [pipe_index]=pipe_proc}
-- link the cmd and pipe proc io
pipe.buildPipeChain(chain)
local cmd_stack = process.info(chain[1]).data.coroutine_handler
-- store handle to io_stream from easy access later
stream.io_stream = process.info(chain[1]).data.io[1].stream
stream.pco = cmd_stack
-- popen commands start out running, like threads
cmd_stack.resume(cmd_stack.root)
local buffered_stream = buffer.new(mode, stream)
buffered_stream:setvbuf("no", 1024)
return buffered_stream
end
return pipe

View File

@ -1,372 +0,0 @@
local tx = require("transforms")
local shell = require("shell")
local sh = require("sh")
local process = require("process")
local pipes = {}
local pipeStream = {}
local function bfd() return nil, "bad file descriptor" end
function pipeStream.new(pm)
local stream = {pm=pm}
local metatable = {__index = pipeStream}
return setmetatable(stream, metatable)
end
function pipeStream:resume()
local yield_args = table.pack(self.pm.pco.resume_all())
if not yield_args[1] then
self.pm.dead = true
if not yield_args[1] and yield_args[2] then
io.stderr:write(tostring(yield_args[2]) .. "\n")
end
end
return table.unpack(yield_args)
end
function pipeStream:close()
if self.pm.closed then -- already closed
return
end
self.pm.closed = true
-- if our pco stack is empty, we've already run fully
if self.pm.pco.top() == nil then
return
end
-- if a thread aborted we have set dead true
if self.pm.dead then
return
end
-- run prog until dead
local co = self.pm.pco.previous_handler
local pco_root = self.pm.threads[1]
if co.status(pco_root) == "dead" then
-- I would have liked the pco stack to unwind itself for dead coroutines
-- maybe I haven't handled aborts correctly
return
end
return self:resume(true)
end
function pipeStream:read(n)
local pm = self.pm
if pm.closed then
return bfd()
end
if pm:buffer() == '' and not pm.dead then
local result = table.pack(self:resume())
if not result[1] then
-- resume can fail if p1 crashes
self:close()
return nil, "pipe closed unexpectedly"
elseif result.n > 1 and not result[2] then
return result[2], result[3]
end
end
local result = pm:buffer(n)
if result == '' and pm.dead and n > 0 then
return nil -- eof
end
return result
end
pipeStream.seek = bfd
function pipeStream:write(v)
local pm = self.pm
if pm.closed or pm.dead then
-- if prog is dead, ignore all writes
if pm.pco.previous_handler.status(pm.threads[pm.self_id]) ~= "dead" then
error("attempt to use a closed stream")
end
return bfd()
end
pm:buffer(pm:buffer() .. v)
-- allow handler to push write event
local result = table.pack(self:resume())
if not result[1] then
-- resume can fail if p1 crashes
pm.dead = true
self:close()
return nil, "pipe closed unexpectedly"
end
return self
end
function pipes.createCoroutineStack(fp, init, name)
local _co = process.info().data.coroutine_handler
local pco = setmetatable(
{
stack = {},
next = false,
create = _co.create,
wrap = _co.wrap,
previous_handler = _co
}, {__index=_co})
function pco.top()
return pco.stack[#pco.stack]
end
function pco.yield(...)
-- pop last
pco.set_unwind(pco.running())
return _co.yield(...)
end
function pco.index_of(thread)
for i,t in ipairs(pco.stack) do
if t == thread then
return i
end
end
end
function pco.yield_all(...)
local current = pco.running()
local existing_index = pco.index_of(current)
assert(existing_index, "cannot yield inactive stack")
pco.next = current
return _co.yield(...)
end
function pco.set_unwind(from)
pco.next = false
if from then
local index = pco.index_of(from)
if index then
pco.stack = tx.sub(pco.stack, 1, index-1)
pco.next = pco.stack[index-1] or false
end
end
end
function pco.resume_all(...)
local base = pco.stack[1]
local top = pco.top()
if type(base) ~= "thread" or _co.status(base) ~= "suspended" or
type(top) ~= "thread" or _co.status(top) ~= "suspended" then
return false
end
local status, result = pcall(function(...)
local _result = table.pack(pco.resume(top, ...))
return _result
end,...)
if not status then
return nil, result
end
return table.unpack(result)
end
function pco.resume(thread, ...)
checkArg(1, thread, "thread")
local status = pco.status(thread)
if status ~= "suspended" then
local msg = string.format("cannot resume %s coroutine",
status == "dead" and "dead" or "non-suspended")
return false, msg
end
local current_index = pco.index_of(pco.running())
local existing_index = pco.index_of(thread)
if not existing_index then
assert(current_index, "pco coroutines cannot resume threads outside the stack")
pco.stack = tx.concat(tx.sub(pco.stack, 1, current_index), {thread})
end
if current_index then
-- current should be waiting for yield
pco.next = thread
local t = table.pack(_co.yield(...)) -- pass args to resume next
return pco.last == nil and true or pco.last, table.unpack(t,1,t.n)
else
-- the stack is not running
pco.next = false
local yield_args = table.pack(_co.resume(thread, ...))
if #pco.stack > 0 then
-- thread may have crashed (crash unwinds as well)
-- or we don't have next lined up (unwind)
if not pco.next or not yield_args[1] then
-- unwind from current index, not top
pco.set_unwind(thread)
end
-- if next is current thread, yield_all is active
-- in such a case, yield out first, then resume where we left off
if pco.next and pco.next ~= thread then
local next = pco.next
pco.next = false
pco.last = yield_args[1]
return pco.resume(next, table.unpack(yield_args,2,yield_args.n))
end
end
return table.unpack(yield_args)
end
end
function pco.status(thread)
checkArg(1, thread, "thread")
local current_index = pco.index_of(pco.running())
local existing_index = pco.index_of(thread)
if current_index and existing_index and existing_index < current_index then
local current = pco.stack[current_index]
if current and _co.status(current) == "running" then
return "normal"
end
end
return _co.status(thread)
end
if fp then
pco.stack = {process.load(fp,nil,init,name or "pco root")}
process.info(pco.stack[1]).data.coroutine_handler = pco
end
return pco
end
local pipeManager = {}
function pipeManager.reader(pm,...)
while pm.pco.status(pm.threads[pm.prog_id]) ~= "dead" do
pm.pco.yield_all()
-- kick back to main thread, true to kick back one further
if pm.closed then break end
-- if we are a reader pipe, we leave the buffer alone and yield to previous
if pm.pco.status(pm.threads[pm.prog_id]) ~= "dead" then
pm.pco.yield(...)
end
end
pm.dead = true
end
function pipeManager:buffer(value)
-- if value but no stream, buffer for buffer
local s = self and self.pipe and self.pipe.stream
if not s then
if type(value) == "string" or self.prewrite then
self.prewrite = self.prewrite or {}
s = self.prewrite -- s.buffer will be self.prewrite.buffer
else
return ''
end
elseif self.prewrite then -- we stored, previously, a prewrite buffer
s.buffer = self.prewrite.buffer .. s.buffer
self.prewrite = nil
end
if type(value) == "string" then
s.buffer = value
return value
elseif type(value) ~= "number" then
return s.buffer -- don't truncate
end
local result = string.sub(s.buffer, 1, value)
s.buffer = string.sub(s.buffer, value + 1)
return result
end
function pipeManager:redirectRead()
local reader = {pm=self}
function reader:read(n)
local pm = self.pm
local pco = pm.pco
-- if we have any buffer, return it first
if pm:buffer() == '' and not pm.closed and not pm.dead then
pco.yield_all()
end
if pm.closed or pm.dead then
return nil
end
return pm:buffer(n)
end
return reader
end
function pipes.createPipe(prog, mode, env)
mode = mode or "r"
if mode ~= "r" and mode ~= "w" then
return nil, "bad argument #2: invalid mode " .. tostring(mode) .. " must be r or w"
end
local shellPath, reason = shell.resolve(os.getenv("SHELL") or "/bin/sh", "lua")
if not shellPath then
return nil, reason
end
local pm = setmetatable(
{dead=false,closed=false,prog=prog,mode=mode,env=env},
{__index=pipeManager}
)
pm.prog_id = pm.mode == "r" and 1 or 2
pm.self_id = pm.mode == "r" and 2 or 1
pm.handler = pm.mode == "r" and
function()return pipeManager.reader(pm)end or
function()pm.dead=true end
pm.commands = {}
pm.commands[pm.prog_id] = {shellPath, {}}
pm.commands[pm.self_id] = {pm.handler, {}}
pm.root = function()
local reason
pm.threads, reason = sh.internal.createThreads(pm.commands, pm.env, {[pm.prog_id]=table.pack(pm.env,pm.prog)})
if not pm.threads then
pm.dead = true
return false, reason
end
pm.pipe = process.info(pm.threads[1]).data.io[1]
-- if we are the writer, we need args to resume prog
if pm.mode == "w" then
pm.pipe.stream.redirect[0] = pm:redirectRead()
end
return sh.internal.runThreads(pm.threads)
end
pm.pco = pipes.createCoroutineStack(pm.root)
return pipeStream.new(pm)
end
function pipes.popen(prog, mode, env)
checkArg(1, prog, "string")
checkArg(2, mode, "string", "nil")
checkArg(3, env, "table", "nil")
local pipe, reason = pipes.createPipe(prog, mode, env)
if not pipe then
return false, reason
end
-- pipe file descriptor
local pfd = require("buffer").new(mode, pipe)
pfd:setvbuf("no", 0) -- 2nd are to read chunk size
-- popen processes start on create (like a thread)
pfd.stream:resume()
return pfd
end
return pipes

View File

@ -68,7 +68,7 @@ function process.load(path, env, init, name)
-- msg can be a custom error object -- msg can be a custom error object
if type(msg) == "table" then if type(msg) == "table" then
if msg.reason ~= "terminated" then if msg.reason ~= "terminated" then
io.stderr:write(msg.reason.."\n") io.stderr:write(tostring(msg.reason), "\n")
end end
return msg.code or 0 return msg.code or 0
end end
@ -104,14 +104,8 @@ function process.load(path, env, init, name)
return thread return thread
end end
function process.running(level) -- kept for backwards compat, prefer process.info
local info = process.info(level)
if info then
return info.path, info.env, info.command
end
end
function process.info(levelOrThread) function process.info(levelOrThread)
checkArg(1, levelOrThread, "thread", "number", "nil")
local p local p
if type(levelOrThread) == "thread" then if type(levelOrThread) == "thread" then
p = process.findProcess(levelOrThread) p = process.findProcess(levelOrThread)
@ -141,4 +135,19 @@ function process.internal.close(thread, result)
process.list[thread] = nil process.list[thread] = nil
end end
function process.internal.continue(co, ...)
local result = {}
-- Emulate CC behavior by making yields a filtered event.pull()
local args = table.pack(...)
while coroutine.status(co) ~= "dead" do
result = table.pack(coroutine.resume(co, table.unpack(args, 1, args.n)))
if coroutine.status(co) ~= "dead" then
args = table.pack(coroutine.yield(table.unpack(result, 2, result.n)))
elseif not result[1] then
io.stderr:write(result[2])
end
end
return table.unpack(result, 2, result.n)
end
return process return process

View File

@ -180,29 +180,12 @@ function sh.internal.createThreads(commands, env, start_args)
end end
if #threads > 1 then if #threads > 1 then
sh.internal.buildPipeChain(threads) require("pipe").buildPipeChain(threads)
end end
return threads return threads
end end
function sh.internal.runThreads(threads)
local result = {}
for i = 1, #threads do
-- Emulate CC behavior by making yields a filtered event.pull()
local thread, args = threads[i], {}
while coroutine.status(thread) ~= "dead" do
result = table.pack(coroutine.resume(thread, table.unpack(args)))
if coroutine.status(thread) ~= "dead" then
args = table.pack(coroutine.yield(table.unpack(result, 2, result.n)))
elseif not result[1] then
io.stderr:write(result[2])
end
end
end
return result[2]
end
function sh.internal.executePipes(pipe_parts, eargs, env) function sh.internal.executePipes(pipe_parts, eargs, env)
local commands = {} local commands = {}
for _,words in ipairs(pipe_parts) do for _,words in ipairs(pipe_parts) do
@ -260,7 +243,7 @@ function sh.internal.executePipes(pipe_parts, eargs, env)
local threads, reason = sh.internal.createThreads(commands, env, {[#commands]=eargs}) local threads, reason = sh.internal.createThreads(commands, env, {[#commands]=eargs})
if not threads then return false, reason end if not threads then return false, reason end
return sh.internal.runThreads(threads) return process.internal.continue(threads[1])
end end
function sh.execute(env, command, ...) function sh.execute(env, command, ...)

View File

@ -124,16 +124,9 @@ function shell.execute(command, env, ...)
if not sh then if not sh then
return false, reason return false, reason
end end
local result = table.pack(coroutine.resume(process.load(function(...) local proc = process.load(sh, nil, nil, command)
return sh(...) local result = table.pack(process.internal.continue(proc, env, command, ...))
end), env, command, ...)) if result.n == 0 then return true end
if not result[1] and type(result[2]) == "table" and result[2].reason == "terminated" then
if result[2].code then
return true
else
return false, "terminated"
end
end
return table.unpack(result, 1, result.n) return table.unpack(result, 1, result.n)
end end

View File

@ -6,7 +6,10 @@ local process = require("process")
local kb = require("keyboard") local kb = require("keyboard")
local keys = kb.keys local keys = kb.keys
local term = setmetatable({internal={}}, {__index=tty}) -- tty is bisected into a delay loaded library
-- term indexing will fail to use full_tty unless tty is fully loaded
-- accessing tty.full_tty [a nonexistent field] will cause that full load
local term = setmetatable({internal={},tty.full_tty}, {__index=tty})
function term.internal.window() function term.internal.window()
return process.info().data.window return process.info().data.window
@ -181,7 +184,7 @@ end
function term.read(history, dobreak, hint, pwchar, filter) function term.read(history, dobreak, hint, pwchar, filter)
if not io.stdin.tty then if not io.stdin.tty then
return io.read() return io.read("*L")
end end
history = history or {} history = history or {}
local handler = history local handler = history

View File

@ -1,9 +1,10 @@
local pipes = require("pipes") local pipe = require("pipe")
local event = require("event") local event = require("event")
local process = require("process") local process = require("process")
local computer = require("computer") local computer = require("computer")
local thread = {} local thread = {}
local init_thread
local function waitForDeath(threads, timeout, all) local function waitForDeath(threads, timeout, all)
checkArg(1, threads, "table") checkArg(1, threads, "table")
@ -15,9 +16,10 @@ local function waitForDeath(threads, timeout, all)
local deadline = computer.uptime() + timeout local deadline = computer.uptime() + timeout
while deadline > computer.uptime() do while deadline > computer.uptime() do
local dieing = {} local dieing = {}
local living = {} local living = false
for _,t in ipairs(threads) do for _,t in ipairs(threads) do
local result = t.process and t.process.data.result local mt = getmetatable(t)
local result = mt.attached.data.result
local proc_ok = type(result) ~= "table" or result[1] local proc_ok = type(result) ~= "table" or result[1]
local ready_to_die = t:status() ~= "running" -- suspended is considered dead to exit local ready_to_die = t:status() ~= "running" -- suspended is considered dead to exit
or not proc_ok -- the thread is killed if its attached process has a non zero exit or not proc_ok -- the thread is killed if its attached process has a non zero exit
@ -25,18 +27,18 @@ local function waitForDeath(threads, timeout, all)
dieing[#dieing + 1] = t dieing[#dieing + 1] = t
mortician[t] = true mortician[t] = true
else else
living[#living + 1] = t living = true
end end
end end
if all and #living == 0 or not all and #dieing > 0 then if all and not living or not all and #dieing > 0 then
timed_out = false timed_out = false
break break
end end
-- resume each non dead thread -- resume each non dead thread
-- we KNOW all threads are event.pull blocked -- we KNOW all threads are event.pull blocked
event.pull() event.pull(deadline - computer.uptime())
end end
for t in pairs(mortician) do for t in pairs(mortician) do
@ -58,8 +60,7 @@ function thread.waitForAll(threads, timeout)
end end
local box_thread = {} local box_thread = {}
local box_thread_handle = {} local box_thread_handle = {close = thread.waitForAll}
box_thread_handle.close = thread.waitForAll
local function get_box_thread_handle(handles, bCreate) local function get_box_thread_handle(handles, bCreate)
for _,next_handle in ipairs(handles) do for _,next_handle in ipairs(handles) do
@ -94,57 +95,61 @@ function box_thread:status()
end end
function box_thread:join(timeout) function box_thread:join(timeout)
self:detach() return waitForDeath({self}, timeout, true)
return box_thread_handle.close({self}, timeout)
end end
function box_thread:kill() function box_thread:kill()
self:detach() getmetatable(self).close()
if self:status() == "dead" then
return
end
getmetatable(self).__status = "dead"
self.pco.stack = {}
end end
function box_thread:detach() function box_thread:detach()
if not self.process then return self:attach(init_thread)
return
end
local handles = self.process.data.handles
local btHandle = get_box_thread_handle(handles)
if not btHandle then
return nil, "thread failed to detach, process had no thread handle"
end
for i,h in ipairs(btHandle) do
if h == self then
return table.remove(btHandle, i)
end
end
return nil, "thread not found in parent process"
end end
function box_thread:attach(parent) function box_thread:attach(parent)
checkArg(1, parent, "thread", "number", "nil") checkArg(1, parent, "thread", "number", "nil")
local mt = assert(getmetatable(self), "thread panic: no metadata")
local proc = process.info(parent) local proc = process.info(parent)
if not proc then return nil, "thread failed to attach, process not found" end if not proc then return nil, "thread failed to attach, process not found" end
self:detach() if mt.attached == proc then return self end -- already attached
local waiting_handler
if mt.attached then
local prev_btHandle = assert(get_box_thread_handle(mt.attached.data.handles), "thread panic: no thread handle")
for i,h in ipairs(prev_btHandle) do
if h == self then
table.remove(prev_btHandle, i)
if mt.id then
waiting_handler = assert(mt.attached.data.handlers[mt.id], "thread panic: no event handler")
mt.attached.data.handlers[mt.id] = nil
end
break
end
end
end
-- attach to parent or the current process -- attach to parent or the current process
self.process = proc mt.attached = proc
local handles = self.process.data.handles local handles = proc.data.handles
-- this process may not have a box_thread manager handle -- this process may not have a box_thread manager handle
local btHandle = get_box_thread_handle(handles, true) local btHandle = get_box_thread_handle(handles, true)
table.insert(btHandle, self) table.insert(btHandle, self)
return true
if waiting_handler then -- event-waiting
mt.register(waiting_handler.timeout - computer.uptime())
end
return self
end end
function thread.create(fp, ...) function thread.create(fp, ...)
checkArg(1, fp, "function") checkArg(1, fp, "function")
local t = setmetatable({}, {__status="suspended",__index=box_thread}) local t = {}
t.pco = pipes.createCoroutineStack(function(...) local mt = {__status="suspended",__index=box_thread}
local mt = getmetatable(t) setmetatable(t, mt)
t.pco = pipe.createCoroutineStack(function(...)
mt.__status = "running" mt.__status = "running"
local fp_co = t.pco.create(fp) local fp_co = t.pco.create(fp)
-- run fp_co until dead -- run fp_co until dead
@ -154,75 +159,132 @@ function thread.create(fp, ...)
while true do while true do
local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n))) local result = table.pack(t.pco.resume(fp_co, table.unpack(args, 1, args.n)))
if t.pco.status(fp_co) == "dead" then if t.pco.status(fp_co) == "dead" then
-- this error handling is VERY much like process.lua
-- maybe one day it'll merge
if not result[1] then if not result[1] then
event.onError(string.format("thread crashed: %s", tostring(result[2]))) local exit_code
local msg = result[2]
-- msg can be a custom error object
local reason = "crashed"
if type(msg) == "table" then
if type(msg.reason) == "string" then
reason = msg.reason
end
exit_code = tonumber(msg.code)
elseif type(msg) == "string" then
reason = msg
end
if not exit_code then
pcall(event.onError, string.format("[thread] %s", reason))
exit_code = 1
end
os.exit(exit_code)
end end
break break
end end
args = table.pack(event.pull(table.unpack(result, 2, result.n))) args = table.pack(event.pull(table.unpack(result, 2, result.n)))
end end
mt.__status = "dead" end, nil, "thread")
event.push("thread_exit")
t:detach() --special resume to keep track of process death
end) function mt.private_resume(...)
local handlers = event.handlers mt.id = nil
local handlers_mt = getmetatable(handlers) -- this thread may have been killed
-- the event library sets a metatable on handlers if t:status() == "dead" then return end
-- but not a pull field local result = table.pack(t.pco.resume(t.pco.root, ...))
if not handlers_mt.pull then if t.pco.status(t.pco.root) == "dead" then
-- if we don't separate root handlers from thread handlers we see double dispatch mt.close()
-- because the thread calls dispatch on pull as well
handlers_mt.handlers = {} -- root handlers
handlers_mt.pull = handlers_mt.__call -- the real computer.pullSignal
handlers_mt.current = function(field) return process.info().data[field] or handlers_mt[field] end
while true do
local key, value = next(handlers)
if not key then break end
handlers_mt.handlers[key] = value
handlers[key] = nil
end
handlers_mt.__index = function(_, key)
return handlers_mt.current("handlers")[key]
end
handlers_mt.__newindex = function(_, key, value)
handlers_mt.current("handlers")[key] = value
end
handlers_mt.__pairs = function(_, ...)
return pairs(handlers_mt.current("handlers"), ...)
end
handlers_mt.__call = function(tbl, ...)
return handlers_mt.current("pull")(tbl, ...)
end end
return table.unpack(result, 1, result.n)
end end
local data = process.info(t.pco.stack[1]).data mt.process = process.list[t.pco.root]
data.handlers = {} mt.process.data.handlers = {}
data.pull = function(_, timeout)
-- register a timeout handler
-- hack so that event.register sees the root handlers
local data_handlers = data.handlers
data.handlers = handlers_mt.handlers
event.register(
nil, -- nil key matches anything, timers use false keys
t.pco.resume_all,
timeout, -- wait for the time specified by the caller
1) -- we only want this thread to wake up once
data.handlers = data_handlers
function mt.register(timeout)
-- register a timeout handler
mt.id = event.register(
nil, -- nil key matches anything, timers use false keys
mt.private_resume,
timeout, -- wait for the time specified by the caller
1, -- we only want this thread to wake up once
mt.attached.data.handlers) -- optional arg, to specify our own handlers
end
function mt.process.data.pull(_, timeout)
mt.register(timeout)
-- yield_all will yield this pco stack -- yield_all will yield this pco stack
-- the callback will resume this stack -- the callback will resume this stack
local event_data local event_data
repeat repeat
event_data = table.pack(t.pco.yield_all(timeout)) event_data = table.pack(t.pco.yield_all(timeout))
-- during sleep, we may have been suspended -- during sleep, we may have been suspended
until getmetatable(t).__status ~= "suspended" until t:status() ~= "suspended"
return table.unpack(event_data, 1, event_data.n) return table.unpack(event_data, 1, event_data.n)
end end
t:attach() function mt.close()
t.pco.resume_all(...) -- threads start out running if t:status() == "dead" then
return
end
local htm = get_box_thread_handle(mt.attached.data.handles)
for _,ht in ipairs(htm) do
if ht == t then
table.remove(htm, _)
break
end
end
mt.__status = "dead"
event.push("thread_exit")
end
t:attach() -- the current process
mt.private_resume(...) -- threads start out running
return t return t
end end
do
local handlers = event.handlers
local handlers_mt = getmetatable(handlers)
-- the event library sets a metatable on handlers, but we set threaded=true
if not handlers_mt.threaded then
-- find the root process
local root_data
for t,p in pairs(process.list) do
if not p.parent then
init_thread = t
root_data = p.data
break
end
end
assert(init_thread, "thread library panic: no init thread")
handlers_mt.threaded = true
-- handles might be optimized out for memory
root_data.handles = root_data.handles or {}
-- if we don't separate root handlers from thread handlers we see double dispatch
-- because the thread calls dispatch on pull as well
root_data.handlers = {} -- root handlers
root_data.pull = handlers_mt.__call -- the real computer.pullSignal
while true do
local key, value = next(handlers)
if not key then break end
root_data.handlers[key] = value
handlers[key] = nil
end
handlers_mt.__index = function(_, key)
return process.info().data.handlers[key]
end
handlers_mt.__newindex = function(_, key, value)
process.info().data.handlers[key] = value
end
handlers_mt.__pairs = function(_, ...)
return pairs(process.info().data.handlers, ...)
end
handlers_mt.__call = function(tbl, ...)
return process.info().data.pull(tbl, ...)
end
end
end
return thread return thread

View File

@ -1,7 +1,7 @@
-- called from /init.lua -- called from /init.lua
local raw_loadfile = ... local raw_loadfile = ...
_G._OSVERSION = "OpenOS 1.6.4" _G._OSVERSION = "OpenOS 1.6.5"
local component = component local component = component
local computer = computer local computer = computer
@ -108,7 +108,6 @@ status("Initializing file system...")
-- Mount the ROM and temporary file systems to allow working on the file -- Mount the ROM and temporary file systems to allow working on the file
-- system module from this point on. -- system module from this point on.
require("filesystem").mount(computer.getBootAddress(), "/") require("filesystem").mount(computer.getBootAddress(), "/")
package.preload={}
status("Running boot scripts...") status("Running boot scripts...")

View File

@ -99,36 +99,6 @@ function sh.internal.openCommandRedirects(redirects)
end end
end end
function sh.internal.buildPipeChain(threads)
local prev_pipe
for i=1,#threads do
local thread = threads[i]
local data = process.info(thread).data
local pio = data.io
local pipe
if i < #threads then
pipe = require("buffer").new("rw", sh.internal.newMemoryStream())
pipe:setvbuf("no", 0)
-- buffer close flushes the buffer, but we have no buffer
-- also, when the buffer is closed, read and writes don't pass through
-- simply put, we don't want buffer:close
pipe.close = function(self) self.stream:close() end
pipe.stream.redirect[1] = rawget(pio, 1)
pio[1] = pipe
table.insert(data.handles, pipe)
end
if prev_pipe then
prev_pipe.stream.redirect[0] = rawget(pio, 0)
prev_pipe.stream.next = thread
pio[0] = prev_pipe
end
prev_pipe = pipe
end
end
-- takes an eword, returns a list of glob hits or {word} if no globs exist -- takes an eword, returns a list of glob hits or {word} if no globs exist
function sh.internal.glob(eword) function sh.internal.glob(eword)
-- words are parts, parts are txt and qr -- words are parts, parts are txt and qr
@ -524,67 +494,6 @@ function sh.internal.remove_negation(chain)
return false return false
end end
function sh.internal.newMemoryStream()
local memoryStream = {}
function memoryStream:close()
self.closed = true
self.redirect = {}
end
function memoryStream:seek()
return nil, "bad file descriptor"
end
function memoryStream:read(n)
if self.closed then
return nil -- eof
end
if self.redirect[0] then
-- popen could be using this code path
-- if that is the case, it is important to leave stream.buffer alone
return self.redirect[0]:read(n)
elseif self.buffer == "" then
coroutine.yield()
end
local result = string.sub(self.buffer, 1, n)
self.buffer = string.sub(self.buffer, n + 1)
return result
end
function memoryStream:write(value)
if not self.redirect[1] and self.closed then
-- if next is dead, ignore all writes
if coroutine.status(self.next) ~= "dead" then
io.stderr:write("attempt to use a closed stream\n")
os.exit(1)
end
elseif self.redirect[1] then
return self.redirect[1]:write(value)
elseif not self.closed then
self.buffer = self.buffer .. value
local result = table.pack(coroutine.resume(self.next))
if coroutine.status(self.next) == "dead" then
self:close()
-- always cause os.exit when the pipe closes
result[1] = nil
end
-- the next pipe
if not result[1] then
os.exit(sh.internal.command_result_as_code(result[2]))
end
return self
end
os.exit(0) -- abort the current process: SIGPIPE
end
local stream = {closed = false, buffer = "",
redirect = {}, result = {}}
local metatable = {__index = memoryStream,
__metatable = "memorystream"}
return setmetatable(stream, metatable)
end
function sh.internal.execute_complex(statements, eargs, env) function sh.internal.execute_complex(statements, eargs, env)
for si=1,#statements do local s = statements[si] for si=1,#statements do local s = statements[si]
local chains = sh.internal.groupChains(s) local chains = sh.internal.groupChains(s)

View File

@ -22,7 +22,7 @@ Many component methods have a short documentation - use `=component.componentNam
You can get a list of all attached components using the `components` program. You can get a list of all attached components using the `components` program.
If you encounter out of memory errors, throw more RAM at your computer. If you encounter out of memory errors, throw more RAM at your computer.
Have you tried turning it off and on again? Have you tried turning it off and on again?
To disable this greeting, install OpenOS to a writeable medium and delete `/etc/motd`. To disable this greeting, install OpenOS to a writeable medium and remove the `/etc/motd` line from `/etc/profile`.
Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/. Did you know OpenComputers has a forum? No? Well, it's at https://oc.cil.li/.
Please report bugs on the Github issue tracker, thank you! Please report bugs on the Github issue tracker, thank you!
Beware of cycles when building networks, or you may get duplicate messages! Beware of cycles when building networks, or you may get duplicate messages!

View File

@ -61,7 +61,7 @@ Hex # Discworld
Homunk # Perry Rhodan Homunk # Perry Rhodan
*Hyun-ae # Analogue: A Hate Story / Hate Plus *Hyun-ae # Analogue: A Hate Story / Hate Plus
Icarus # Deus Ex Icarus # Deus Ex
J.A.R.V.I.S # Iron Man J.A.R.V.I.S. # Iron Man
Johnny 5 # Short Circuit Johnny 5 # Short Circuit
LizzyTrickster # Contributor LizzyTrickster # Contributor
K-9 # Doctor Who K-9 # Doctor Who
@ -91,6 +91,7 @@ Shakey # The first general-purpose mobile robot that could reason ab
Skynet # Terminator Skynet # Terminator
Space Core # Portal Space Core # Portal
SpiritedDusty # Contributor SpiritedDusty # Contributor
T-800 # Terminator
T-1000 # Terminator T-1000 # Terminator
Tachikoma # Ghost in the Shell Tachikoma # Ghost in the Shell
TARA UH # Perry Rhodan TARA UH # Perry Rhodan
@ -100,7 +101,7 @@ Uniblab # The Jetsons
Unimate # First programmable robot. Unimate # First programmable robot.
Vertigo # Perry Rhodan Vertigo # Perry Rhodan
Vexatos # Contributor Vexatos # Contributor
V.I.K.I # Virtual Interactive Kinetic Intelligence - I, Robot V.I.K.I. # Virtual Interactive Kinetic Intelligence - I, Robot
Wall-E # Wall-E Wall-E # Wall-E
Watson # IBM Watson Watson # IBM Watson
Weebo # Flubber Weebo # Flubber

View File

@ -16,7 +16,7 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has
private val bufferMargin = BufferRenderer.margin + BufferRenderer.innerMargin private val bufferMargin = BufferRenderer.margin + BufferRenderer.innerMargin
private var didDrag = false private var didClick = false
private var x, y = 0 private var x, y = 0
@ -57,13 +57,13 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has
override protected def mouseReleased(mouseX: Int, mouseY: Int, button: Int) { override protected def mouseReleased(mouseX: Int, mouseY: Int, button: Int) {
super.mouseReleased(mouseX, mouseY, button) super.mouseReleased(mouseX, mouseY, button)
if (hasMouse && button >= 0) { if (hasMouse && button >= 0) {
if (didDrag) { if (didClick) {
toBufferCoordinates(mouseX, mouseY) match { toBufferCoordinates(mouseX, mouseY) match {
case Some((bx, by)) => buffer.mouseUp(bx, by, button, null) case Some((bx, by)) => buffer.mouseUp(bx, by, button, null)
case _ => buffer.mouseUp(-1.0, -1.0, button, null) case _ => buffer.mouseUp(-1.0, -1.0, button, null)
} }
} }
didDrag = false didClick = false
mx = -1 mx = -1
my = -1 my = -1
} }
@ -74,7 +74,7 @@ class Screen(val buffer: api.internal.TextBuffer, val hasMouse: Boolean, val has
case Some((bx, by)) if bx.toInt != mx || (by*2).toInt != my => case Some((bx, by)) if bx.toInt != mx || (by*2).toInt != my =>
if (mx >= 0 && my >= 0) buffer.mouseDrag(bx, by, button, null) if (mx >= 0 && my >= 0) buffer.mouseDrag(bx, by, button, null)
else buffer.mouseDown(bx, by, button, null) else buffer.mouseDown(bx, by, button, null)
didDrag = mx >= 0 && my >= 0 didClick = true
mx = bx.toInt mx = bx.toInt
my = (by*2).toInt // for high precision mode, sends some unnecessary packets when not using it, but eh my = (by*2).toInt // for high precision mode, sends some unnecessary packets when not using it, but eh
case _ => case _ =>

View File

@ -7,15 +7,19 @@ import li.cil.oc.util.ExtendedArguments._
import li.cil.oc.util.FluidUtils import li.cil.oc.util.FluidUtils
import li.cil.oc.util.ResultWrapper.result import li.cil.oc.util.ResultWrapper.result
import net.minecraftforge.fluids.FluidStack import net.minecraftforge.fluids.FluidStack
import net.minecraftforge.fluids.FluidTankInfo
trait TankWorldControl extends TankAware with WorldAware with SideRestricted { trait TankWorldControl extends TankAware with WorldAware with SideRestricted {
@Callback(doc = "function(side:number):boolean -- Compare the fluid in the selected tank with the fluid on the specified side. Returns true if equal.") @Callback(doc = "function(side:number [, tank:number]):boolean -- Compare the fluid in the selected tank with the fluid in the specified tank on the specified side. Returns true if equal.")
def compareFluid(context: Context, args: Arguments): Array[AnyRef] = { def compareFluid(context: Context, args: Arguments): Array[AnyRef] = {
val side = checkSideForAction(args, 0) val side = checkSideForAction(args, 0)
fluidInTank(selectedTank) match { fluidInTank(selectedTank) match {
case Some(stack) => case Some(stack) =>
FluidUtils.fluidHandlerAt(position.offset(side), side.getOpposite) match { FluidUtils.fluidHandlerAt(position.offset(side), side.getOpposite) match {
case Some(handler) => result(Option(handler.getTankInfo(side.getOpposite)).exists(_.exists(other => stack.isFluidEqual(other.fluid)))) case Some(handler) => args.optTankInfo(handler, side.getOpposite, 1, null) match {
case info: FluidTankInfo => result(stack.isFluidEqual(info.fluid))
case _ => result(Option(handler.getTankInfo(side.getOpposite)).exists(_.exists(other => stack.isFluidEqual(other.fluid))))
}
case _ => result(false) case _ => result(false)
} }
case _ => result(false) case _ => result(false)

View File

@ -5,38 +5,55 @@ import li.cil.oc.api.machine.Arguments
import li.cil.oc.api.machine.Callback import li.cil.oc.api.machine.Callback
import li.cil.oc.api.machine.Context import li.cil.oc.api.machine.Context
import li.cil.oc.server.component.result import li.cil.oc.server.component.result
import li.cil.oc.util.ExtendedArguments._
import li.cil.oc.util.FluidUtils import li.cil.oc.util.FluidUtils
import net.minecraftforge.fluids.FluidTankInfo
trait WorldTankAnalytics extends WorldAware with SideRestricted { trait WorldTankAnalytics extends WorldAware with SideRestricted {
@Callback(doc = """function(side:number):number -- Get the amount of fluid in the tank on the specified side.""") @Callback(doc = """function(side:number [, tank:number]):number -- Get the amount of fluid in the specified tank on the specified side.""")
def getTankLevel(context: Context, args: Arguments): Array[AnyRef] = { def getTankLevel(context: Context, args: Arguments): Array[AnyRef] = {
val facing = checkSideForAction(args, 0) val facing = checkSideForAction(args, 0)
FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match { FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match {
case Some(handler) => case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match {
result(handler.getTankInfo(facing.getOpposite).map(info => Option(info.fluid).fold(0)(_.amount)).sum) case info: FluidTankInfo => result(Option(info.fluid).fold(0)(_.amount))
case _ => result(handler.getTankInfo(facing.getOpposite).map(info => Option(info.fluid).fold(0)(_.amount)).sum)
}
case _ => result(Unit, "no tank") case _ => result(Unit, "no tank")
} }
} }
@Callback(doc = """function(side:number):number -- Get the capacity of the tank on the specified side.""") @Callback(doc = """function(side:number [, tank:number]):number -- Get the capacity of the specified tank on the specified side.""")
def getTankCapacity(context: Context, args: Arguments): Array[AnyRef] = { def getTankCapacity(context: Context, args: Arguments): Array[AnyRef] = {
val facing = checkSideForAction(args, 0) val facing = checkSideForAction(args, 0)
FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match { FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match {
case Some(handler) => case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match {
result(handler.getTankInfo(facing.getOpposite).map(_.capacity).foldLeft(0)((max, capacity) => math.max(max, capacity))) case info: FluidTankInfo => result(info.capacity)
case _ => result(handler.getTankInfo(facing.getOpposite).map(_.capacity).foldLeft(0)((max, capacity) => math.max(max, capacity)))
}
case _ => result(Unit, "no tank") case _ => result(Unit, "no tank")
} }
} }
@Callback(doc = """function(side:number):table -- Get a description of the fluid in the the tank on the specified side.""") @Callback(doc = """function(side:number [, tank:number]):table -- Get a description of the fluid in the the specified tank on the specified side.""")
def getFluidInTank(context: Context, args: Arguments): Array[AnyRef] = if (Settings.get.allowItemStackInspection) { def getFluidInTank(context: Context, args: Arguments): Array[AnyRef] = if (Settings.get.allowItemStackInspection) {
val facing = checkSideForAction(args, 0) val facing = checkSideForAction(args, 0)
FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match { FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match {
case Some(handler) => case Some(handler) => args.optTankInfo(handler, facing.getOpposite, 1, null) match {
result(handler.getTankInfo(facing.getOpposite)) case info: FluidTankInfo => result(info)
case _ => result(handler.getTankInfo(facing.getOpposite))
}
case _ => result(Unit, "no tank") case _ => result(Unit, "no tank")
} }
} }
else result(Unit, "not enabled in config") else result(Unit, "not enabled in config")
@Callback(doc = """function(side:number):number -- Get the number of tanks available on the specified side.""")
def getTankCount(context: Context, args: Arguments): Array[AnyRef] = {
val facing = checkSideForAction(args, 0)
FluidUtils.fluidHandlerAt(position.offset(facing), facing.getOpposite) match {
case Some(handler) => result(handler.getTankInfo(facing.getOpposite).length)
case _ => result(Unit, "no tank")
}
}
} }

View File

@ -6,6 +6,8 @@ import net.minecraft.inventory.IInventory
import net.minecraft.util.EnumFacing import net.minecraft.util.EnumFacing
import net.minecraftforge.fluids.Fluid import net.minecraftforge.fluids.Fluid
import net.minecraftforge.items.IItemHandler import net.minecraftforge.items.IItemHandler
import net.minecraftforge.fluids.FluidTankInfo
import net.minecraftforge.fluids.IFluidHandler
import scala.language.implicitConversions import scala.language.implicitConversions
@ -47,6 +49,19 @@ object ExtendedArguments {
tank tank
} }
def checkTankInfo(handler: IFluidHandler, side: EnumFacing, n: Int) = {
val tank = args.checkInteger(n) - 1
if (tank < 0 || tank >= handler.getTankInfo(side).length) {
throw new IllegalArgumentException("invalid tank index")
}
handler.getTankInfo(side)(tank)
}
def optTankInfo(handler: IFluidHandler, side: EnumFacing, n: Int, default: FluidTankInfo) = {
if (!isDefined(n)) default
else checkTankInfo(handler, side, n)
}
def checkSideAny(index: Int) = checkSide(index, EnumFacing.values: _*) def checkSideAny(index: Int) = checkSide(index, EnumFacing.values: _*)
def optSideAny(index: Int, default: EnumFacing) = def optSideAny(index: Int, default: EnumFacing) =