mirror of
https://github.com/MightyPirates/OpenComputers.git
synced 2025-09-15 02:12:42 -04:00
changed power model to an overflow based one instead of averaging all buffers (buffers across the network will be emptied / filled one after the other, regardless from where the power delta came)
This commit is contained in:
parent
0eb6ef41de
commit
e68aab67ba
@ -26,17 +26,27 @@ package li.cil.oc.api.network;
|
||||
*/
|
||||
public interface Connector extends Node {
|
||||
/**
|
||||
* The size of the buffer.
|
||||
* The power stored in the local buffer.
|
||||
*/
|
||||
double bufferSize();
|
||||
double localBuffer();
|
||||
|
||||
/**
|
||||
* The power stored in the buffer.
|
||||
* The size of the local buffer.
|
||||
*/
|
||||
double buffer();
|
||||
double localBufferSize();
|
||||
|
||||
/**
|
||||
* Try to apply the specified delta to the buffer.
|
||||
* The accumulative power stored across all buffers in the node's network.
|
||||
*/
|
||||
double globalBuffer();
|
||||
|
||||
/**
|
||||
* The accumulative size of all buffers in the node's network.
|
||||
*/
|
||||
double globalBufferSize();
|
||||
|
||||
/**
|
||||
* Try to apply the specified delta to the <em>global</em> buffer.
|
||||
* <p/>
|
||||
* This can be used to apply reactionary power changes. For example, a
|
||||
* screen may require a certain amount of power to refresh its display when
|
||||
@ -45,12 +55,17 @@ public interface Connector extends Node {
|
||||
* <p/>
|
||||
* For negative values, if there is not enough power stored in the buffer
|
||||
* this will return <tt>false</tt>, and the operation depending on the power
|
||||
* should fail.
|
||||
* should fail - what power there is will still be consumed, though!
|
||||
* <p/>
|
||||
* For positive values, if there is a buffer overflow due to the added power
|
||||
* the surplus will be lost and this will return <tt>false</tt>.
|
||||
* <p/>
|
||||
* If there is enough power or no overflow this will return <tt>true</tt>.
|
||||
* <p/>
|
||||
* Keep in mind that this change is applied to the <em>global</em> buffer,
|
||||
* i.e. power from multiple buffers may be consumed / multiple buffers may
|
||||
* be filled. The buffer for which this method is called (i.e. this node
|
||||
* instance) will be prioritized, though.
|
||||
*
|
||||
* @param delta the amount of power to consume or make available.
|
||||
* @return whether the power could be consumed or stored.
|
||||
|
@ -28,8 +28,6 @@ class PowerDistributor(val parent: SimpleDelegator) extends SimpleDelegate {
|
||||
|
||||
override def getLightValue(world: IBlockAccess, x: Int, y: Int, z: Int) = 5
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
// Tile entity
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def hasTileEntity = true
|
||||
|
@ -31,8 +31,8 @@ class Analyzer(val parent: Delegator) extends Delegate {
|
||||
|
||||
private def analyzeNode(environment: Environment, player: EntityPlayer) = if (environment != null) {
|
||||
environment.node match {
|
||||
case connector: Connector =>
|
||||
player.addChatMessage("Power: %.2f/%.2f".format(connector.buffer, connector.bufferSize))
|
||||
case connector: Connector if connector.localBufferSize > 0 =>
|
||||
player.addChatMessage("Stored power: %.2f/%.2f".format(connector.localBuffer, connector.localBufferSize))
|
||||
case _ =>
|
||||
}
|
||||
environment.node match {
|
||||
|
@ -19,7 +19,7 @@ class PowerConverter extends Environment with IEnergySink with IPowerReceptor wi
|
||||
withConnector(Config.bufferConverter).
|
||||
create()
|
||||
|
||||
private def demand = node.bufferSize - node.buffer
|
||||
private def demand = node.localBufferSize - node.localBuffer
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
// Energy conversion ratios, Mode -> Internal
|
||||
@ -108,7 +108,7 @@ class PowerConverter extends Environment with IEnergySink with IPowerReceptor wi
|
||||
// We try to avoid requesting energy when we need less than what we get with
|
||||
// a single packet. However, if our buffer gets dangerously low we will ask
|
||||
// for energy even if there's the danger of wasting some energy.
|
||||
if (demand >= lastPacketSize * ratioIndustrialCraft || demand > node.bufferSize * 0.5) {
|
||||
if (demand >= lastPacketSize * ratioIndustrialCraft || demand > node.localBufferSize * 0.5) {
|
||||
demand
|
||||
} else 0
|
||||
}
|
||||
@ -130,7 +130,7 @@ class PowerConverter extends Environment with IEnergySink with IPowerReceptor wi
|
||||
if (node != null && powerHandler.isEmpty) {
|
||||
val handler = new PowerHandler(this, PowerHandler.Type.STORAGE)
|
||||
if (handler != null) {
|
||||
handler.configure(1, 320, Float.MaxValue, node.bufferSize.toFloat / ratioBuildCraft)
|
||||
handler.configure(1, 320, Float.MaxValue, node.localBufferSize.toFloat / ratioBuildCraft)
|
||||
handler.configurePowerPerdition(0, 0)
|
||||
powerHandler = Some(handler)
|
||||
}
|
||||
|
@ -5,26 +5,81 @@ import li.cil.oc.api.network._
|
||||
import li.cil.oc.client.{PacketSender => ClientPacketSender}
|
||||
import li.cil.oc.server.network.Connector
|
||||
import li.cil.oc.server.{PacketSender => ServerPacketSender}
|
||||
import net.minecraft.entity.player.EntityPlayer
|
||||
import scala.collection.convert.WrapAsScala._
|
||||
import scala.collection.mutable
|
||||
|
||||
class PowerDistributor extends Environment {
|
||||
class PowerDistributor extends Environment with Analyzable {
|
||||
val node = api.Network.newNode(this, Visibility.Network).create()
|
||||
|
||||
val connectors = mutable.Set.empty[Connector]
|
||||
var globalBuffer = 0.0
|
||||
|
||||
var globalBufferSize = 0.0
|
||||
|
||||
var average = 0.0
|
||||
|
||||
private var lastSentAverage = 0.0
|
||||
|
||||
private val buffers = mutable.Set.empty[Connector]
|
||||
|
||||
private val distributors = mutable.Set.empty[PowerDistributor]
|
||||
|
||||
private var dirty = true
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
def onAnalyze(player: EntityPlayer, side: Int, hitX: Float, hitY: Float, hitZ: Float) = {
|
||||
player.addChatMessage("Global power: %.2f/%.2f".format(globalBuffer, globalBufferSize))
|
||||
this
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
def changeBuffer(delta: Double): Boolean = {
|
||||
if (delta != 0) {
|
||||
val oldBuffer = globalBuffer
|
||||
globalBuffer = (globalBuffer + delta) max 0 min globalBufferSize
|
||||
if (globalBuffer != oldBuffer) {
|
||||
dirty = true
|
||||
if (delta < 0) {
|
||||
var remaining = -delta
|
||||
for (connector <- buffers if connector.localBuffer > 0) {
|
||||
if (connector.localBuffer < remaining) {
|
||||
remaining -= connector.localBuffer
|
||||
connector.localBuffer = 0
|
||||
}
|
||||
else {
|
||||
connector.changeBuffer(-remaining)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (delta > 0) {
|
||||
var remaining = delta
|
||||
for (connector <- buffers if connector.localBuffer < connector.localBufferSize) {
|
||||
val space = connector.localBufferSize - connector.localBuffer
|
||||
if (space < remaining) {
|
||||
remaining -= space
|
||||
connector.localBuffer = connector.localBufferSize
|
||||
}
|
||||
else {
|
||||
connector.changeBuffer(remaining)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def updateEntity() {
|
||||
super.updateEntity()
|
||||
if (!worldObj.isRemote && connectors.exists(_.dirty))
|
||||
balance()
|
||||
if (!worldObj.isRemote && (dirty || buffers.exists(_.dirty))) {
|
||||
updateCachedValues()
|
||||
}
|
||||
}
|
||||
|
||||
override def validate() {
|
||||
@ -36,38 +91,27 @@ class PowerDistributor extends Environment {
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def onDisconnect(node: Node) {
|
||||
super.onDisconnect(node)
|
||||
if (node == this.node) {
|
||||
connectors.clear()
|
||||
distributors.clear()
|
||||
average = -1
|
||||
}
|
||||
else node match {
|
||||
case connector: Connector =>
|
||||
connectors -= connector
|
||||
balance()
|
||||
case _ => node.host match {
|
||||
case distributor: PowerDistributor => distributors -= distributor
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override def onConnect(node: Node) {
|
||||
super.onConnect(node)
|
||||
if (node == this.node) {
|
||||
for (node <- node.network.nodes) node match {
|
||||
case connector: Connector => connectors += connector
|
||||
case connector: Connector if connector.localBufferSize > 0 =>
|
||||
buffers += connector
|
||||
globalBuffer += connector.localBuffer
|
||||
globalBufferSize += connector.localBufferSize
|
||||
case _ => node.host match {
|
||||
case distributor: PowerDistributor => distributors += distributor
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
balance()
|
||||
dirty = true
|
||||
}
|
||||
else node match {
|
||||
case connector: Connector => connectors += connector
|
||||
case connector: Connector =>
|
||||
buffers += connector
|
||||
globalBuffer += connector.localBuffer
|
||||
globalBufferSize += connector.localBufferSize
|
||||
dirty = true
|
||||
case _ => node.host match {
|
||||
case distributor: PowerDistributor => distributors += distributor
|
||||
case _ =>
|
||||
@ -75,28 +119,48 @@ class PowerDistributor extends Environment {
|
||||
}
|
||||
}
|
||||
|
||||
override def onDisconnect(node: Node) {
|
||||
super.onDisconnect(node)
|
||||
if (node == this.node) {
|
||||
buffers.clear()
|
||||
distributors.clear()
|
||||
globalBuffer = 0
|
||||
globalBufferSize = 0
|
||||
average = -1
|
||||
}
|
||||
else node match {
|
||||
case connector: Connector =>
|
||||
buffers -= connector
|
||||
globalBuffer -= connector.localBuffer
|
||||
globalBufferSize -= connector.localBufferSize
|
||||
dirty = true
|
||||
case _ => node.host match {
|
||||
case distributor: PowerDistributor => distributors -= distributor
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
private def balance() {
|
||||
def updateCachedValues() {
|
||||
// Computer average fill ratio of all buffers.
|
||||
val (minRelativeBuffer, maxRelativeBuffer, sumBuffer, sumBufferSize) =
|
||||
connectors.foldRight((1.0, 0.0, 0.0, 0.0))((c, acc) => {
|
||||
val (sumBuffer, sumBufferSize) =
|
||||
buffers.foldRight((0.0, 0.0))((c, acc) => {
|
||||
c.dirty = false // clear dirty flag for all connectors
|
||||
(acc._1 min (c.buffer / c.bufferSize), acc._2 max (c.buffer / c.bufferSize),
|
||||
acc._3 + c.buffer, acc._4 + c.bufferSize)
|
||||
(acc._1 + c.localBuffer, acc._2 + c.localBufferSize)
|
||||
})
|
||||
average = if (sumBufferSize > 0) sumBuffer / sumBufferSize else 0
|
||||
if ((lastSentAverage - average).abs > 0.05) {
|
||||
lastSentAverage = average
|
||||
for (distributor <- distributors) {
|
||||
distributor.average = average
|
||||
average = if (globalBufferSize > 0) globalBuffer / globalBufferSize else 0
|
||||
val shouldSend = (lastSentAverage - average).abs > 0.05
|
||||
for (distributor <- distributors) {
|
||||
distributor.dirty = false
|
||||
distributor.globalBuffer = sumBuffer
|
||||
distributor.globalBufferSize = sumBufferSize
|
||||
distributor.average = average
|
||||
if (shouldSend) {
|
||||
distributor.lastSentAverage = lastSentAverage
|
||||
ServerPacketSender.sendPowerState(distributor)
|
||||
}
|
||||
}
|
||||
if (maxRelativeBuffer - minRelativeBuffer > 10e-4) {
|
||||
// Adjust buffer fill ratio for all buffers to average.
|
||||
connectors.foreach(c => c.buffer = c.bufferSize * average)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,9 +14,9 @@ class PowerSupply extends ManagedComponent {
|
||||
node.changeBuffer(1)
|
||||
}
|
||||
|
||||
@LuaCallback(value = "bufferSize", direct = true)
|
||||
def bufferSize(context: Context, args: Arguments): Array[AnyRef] = result(node.bufferSize)
|
||||
@LuaCallback(value = "localBufferSize", direct = true)
|
||||
def bufferSize(context: Context, args: Arguments): Array[AnyRef] = result(node.localBufferSize)
|
||||
|
||||
@LuaCallback(value = "buffer", direct = true)
|
||||
def buffer(context: Context, args: Arguments): Array[AnyRef] = result(node.buffer)
|
||||
@LuaCallback(value = "localBuffer", direct = true)
|
||||
def buffer(context: Context, args: Arguments): Array[AnyRef] = result(node.localBuffer)
|
||||
}
|
||||
|
@ -2,40 +2,77 @@ package li.cil.oc.server.network
|
||||
|
||||
import li.cil.oc.Config
|
||||
import li.cil.oc.api.network
|
||||
import li.cil.oc.api.network.{Node => ImmutableNode}
|
||||
import li.cil.oc.common.tileentity.PowerDistributor
|
||||
import li.cil.oc.util.Persistable
|
||||
import net.minecraft.nbt.NBTTagCompound
|
||||
import scala.collection.convert.WrapAsScala._
|
||||
|
||||
trait Connector extends network.Connector with Persistable {
|
||||
val bufferSize: Double
|
||||
trait Connector extends Node with network.Connector with Persistable {
|
||||
val localBufferSize: Double
|
||||
|
||||
var localBuffer = 0.0
|
||||
|
||||
var dirty = true
|
||||
|
||||
var buffer = 0.0
|
||||
private var distributor: Option[PowerDistributor] = None
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
def globalBuffer = distributor.fold(localBuffer)(_.globalBuffer)
|
||||
|
||||
def globalBufferSize = distributor.fold(localBufferSize)(_.globalBufferSize)
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
def changeBuffer(delta: Double) = if (delta != 0) {
|
||||
val oldBuffer = buffer
|
||||
buffer = buffer + delta
|
||||
val ok = if (buffer < 0) {
|
||||
buffer = 0
|
||||
false
|
||||
val oldBuffer = localBuffer
|
||||
localBuffer = localBuffer + delta
|
||||
val ok = if (localBuffer < 0) {
|
||||
val remaining = localBuffer
|
||||
localBuffer = 0
|
||||
distributor.fold(false)(_.changeBuffer(remaining))
|
||||
}
|
||||
else if (buffer > bufferSize) {
|
||||
buffer = bufferSize
|
||||
false
|
||||
else if (localBuffer > localBufferSize) {
|
||||
val remaining = localBuffer - localBufferSize
|
||||
localBuffer = localBufferSize
|
||||
distributor.fold(false)(_.changeBuffer(remaining))
|
||||
}
|
||||
else true
|
||||
if (buffer != oldBuffer) dirty = true
|
||||
dirty ||= (localBuffer != oldBuffer)
|
||||
ok || Config.ignorePower
|
||||
} else true
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def onConnect(node: ImmutableNode) {
|
||||
if (node == this) findDistributor()
|
||||
else if (distributor.isEmpty) node.host match {
|
||||
case distributor: PowerDistributor => this.distributor = Some(distributor)
|
||||
case _ =>
|
||||
}
|
||||
super.onConnect(node)
|
||||
}
|
||||
|
||||
override def onDisconnect(node: ImmutableNode) {
|
||||
if (node != this && distributor.exists(_ == node.host)) findDistributor()
|
||||
super.onDisconnect(node)
|
||||
}
|
||||
|
||||
private def findDistributor() {
|
||||
distributor = reachableNodes.find(_.host.isInstanceOf[PowerDistributor]).fold(None: Option[PowerDistributor])(n => Some(n.host.asInstanceOf[PowerDistributor]))
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def load(nbt: NBTTagCompound) {
|
||||
super.load(nbt)
|
||||
buffer = nbt.getDouble(Config.namespace + "connector.buffer") max 0 min bufferSize
|
||||
localBuffer = nbt.getDouble(Config.namespace + "connector.buffer") max 0 min localBufferSize
|
||||
dirty = true
|
||||
}
|
||||
|
||||
override def save(nbt: NBTTagCompound) {
|
||||
super.save(nbt)
|
||||
nbt.setDouble(Config.namespace + "connector.buffer", buffer)
|
||||
nbt.setDouble(Config.namespace + "connector.buffer", localBuffer)
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
def this(node: MutableNode) = {
|
||||
this()
|
||||
addNew(node)
|
||||
node.host.onConnect(node)
|
||||
node.onConnect(node)
|
||||
}
|
||||
|
||||
private lazy val wrapper = new Network.Wrapper(this)
|
||||
@ -48,9 +48,9 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
assert(!oldNodeB.edges.exists(_.isBetween(oldNodeA, oldNodeB)))
|
||||
Network.Edge(oldNodeA, oldNodeB)
|
||||
if (oldNodeA.data.reachability == Visibility.Neighbors)
|
||||
oldNodeB.data.host.onConnect(oldNodeA.data)
|
||||
oldNodeB.data.onConnect(oldNodeA.data)
|
||||
if (oldNodeB.data.reachability == Visibility.Neighbors)
|
||||
oldNodeA.data.host.onConnect(oldNodeB.data)
|
||||
oldNodeA.data.onConnect(oldNodeB.data)
|
||||
true
|
||||
}
|
||||
else false // That connection already exists.
|
||||
@ -76,9 +76,9 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
case Some(edge) => {
|
||||
handleSplit(edge.remove())
|
||||
if (edge.left.data.reachability == Visibility.Neighbors)
|
||||
edge.right.data.host.onDisconnect(edge.left.data)
|
||||
edge.right.data.onDisconnect(edge.left.data)
|
||||
if (edge.right.data.reachability == Visibility.Neighbors)
|
||||
edge.left.data.host.onDisconnect(edge.right.data)
|
||||
edge.left.data.onDisconnect(edge.right.data)
|
||||
true
|
||||
}
|
||||
case _ => false // That connection doesn't exists.
|
||||
@ -96,7 +96,7 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
case Visibility.Network => subGraphs.map(_.values.map(_.data)).flatten
|
||||
})
|
||||
handleSplit(subGraphs)
|
||||
targets.foreach(_.host.onDisconnect(node))
|
||||
targets.foreach(_.asInstanceOf[MutableNode].onDisconnect(node))
|
||||
true
|
||||
}
|
||||
case _ => false
|
||||
@ -209,7 +209,7 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
Network.Edge(oldNode, node(addedNode))
|
||||
}
|
||||
|
||||
for ((node, nodes) <- connects) nodes.foreach(_.host.onConnect(node))
|
||||
for ((node, nodes) <- connects) nodes.foreach(_.asInstanceOf[MutableNode].onConnect(node))
|
||||
|
||||
true
|
||||
}
|
||||
@ -229,8 +229,8 @@ class Network private(private val data: mutable.Map[String, Network.Vertex] = mu
|
||||
for (indexB <- (indexA + 1) until subGraphs.length) {
|
||||
val nodesB = nodes(indexB)
|
||||
val visibleNodesB = visibleNodes(indexB)
|
||||
visibleNodesA.foreach(node => nodesB.foreach(_.host.onDisconnect(node)))
|
||||
visibleNodesB.foreach(node => nodesA.foreach(_.host.onDisconnect(node)))
|
||||
visibleNodesA.foreach(node => nodesB.foreach(_.onDisconnect(node)))
|
||||
visibleNodesB.foreach(node => nodesA.foreach(_.onDisconnect(node)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -307,7 +307,7 @@ object Network extends api.detail.NetworkAPI {
|
||||
def create() = if (FMLCommonHandler.instance.getEffectiveSide == Side.SERVER) new MutableNode with Connector {
|
||||
val host = _host
|
||||
val reachability = _reachability
|
||||
val bufferSize = _bufferSize
|
||||
val localBufferSize = _bufferSize
|
||||
}
|
||||
else null
|
||||
}
|
||||
@ -317,7 +317,7 @@ object Network extends api.detail.NetworkAPI {
|
||||
val host = _host
|
||||
val reachability = _reachability
|
||||
val name = _name
|
||||
val bufferSize = _bufferSize
|
||||
val localBufferSize = _bufferSize
|
||||
setVisibility(_visibility)
|
||||
}
|
||||
else null
|
||||
|
@ -53,13 +53,25 @@ trait Node extends api.network.Node with Persistable {
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
def onConnect(node: ImmutableNode) {
|
||||
host.onConnect(node)
|
||||
}
|
||||
|
||||
def onDisconnect(node: ImmutableNode) {
|
||||
host.onDisconnect(node)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- //
|
||||
|
||||
override def load(nbt: NBTTagCompound) = {
|
||||
super.load(nbt)
|
||||
if (nbt.hasKey(Config.namespace + "node.address")) {
|
||||
address = nbt.getString(Config.namespace + "node.address")
|
||||
}
|
||||
}
|
||||
|
||||
override def save(nbt: NBTTagCompound) = {
|
||||
super.save(nbt)
|
||||
if (address != null) {
|
||||
nbt.setString(Config.namespace + "node.address", address)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user