diff --git a/MCGalaxy/Commands/Fun/CmdCountdown.cs b/MCGalaxy/Commands/Fun/CmdCountdown.cs
index 39338eefc..dbf3a34b6 100644
--- a/MCGalaxy/Commands/Fun/CmdCountdown.cs
+++ b/MCGalaxy/Commands/Fun/CmdCountdown.cs
@@ -55,7 +55,7 @@ namespace MCGalaxy.Commands.Fun {
}
protected override void HandleSet(Player p, RoundsGame game_, string[] args) {
- if (args.Length < 4) { Help(p); return; }
+ if (args.Length < 4) { Help(p); return; }
if (game_.Running) {
p.Message("You must stop Countdown before replacing the map."); return;
}
diff --git a/MCGalaxy/MCGalaxy_.csproj b/MCGalaxy/MCGalaxy_.csproj
index d79b82ddd..b4db08009 100644
--- a/MCGalaxy/MCGalaxy_.csproj
+++ b/MCGalaxy/MCGalaxy_.csproj
@@ -627,6 +627,7 @@
+
diff --git a/MCGalaxy/Server/Backup.cs b/MCGalaxy/Server/Backup.cs
index 2ba7f1f3d..94bbd66f3 100644
--- a/MCGalaxy/Server/Backup.cs
+++ b/MCGalaxy/Server/Backup.cs
@@ -23,7 +23,7 @@ using System.IO.Packaging;
namespace MCGalaxy {
public static partial class Backup {
- const string path = "MCGalaxy.zip";
+ const string zipPath = "MCGalaxy.zip", sqlPath = "SQL.sql", dbPath = "MCGalaxy.db";
public class BackupArgs {
public Player p;
public bool Files, Database, Lite;
@@ -32,102 +32,85 @@ namespace MCGalaxy {
public static void CreatePackage(Player p, bool files, bool db, bool lite) {
if (db) {
Logger.Log(LogType.SystemActivity, "Backing up the database...");
- using (StreamWriter sql = new StreamWriter("SQL.sql"))
+ using (StreamWriter sql = new StreamWriter(sqlPath))
BackupDatabase(sql,lite);
- Logger.Log(LogType.SystemActivity, "Backed up the database to SQL.sql");
+ Logger.Log(LogType.SystemActivity, "Backed up the database to " + sqlPath);
}
- List filesList = null;
+ List filesList = null;
if (files) {
Logger.Log(LogType.SystemActivity, "Determining which files to backup...");
- string dir = Directory.GetCurrentDirectory() + "\\";
- filesList = GetAllFiles(new DirectoryInfo("./"), new Uri(dir), lite);
+ filesList = GetAllFiles(lite);
Logger.Log(LogType.SystemActivity, "Finished determining included files");
}
Logger.Log(LogType.SystemActivity, "Creating compressed backup...");
- using (ZipPackage package = (ZipPackage)ZipPackage.Open(path, FileMode.Create)) {
+ using (Stream stream = File.Create(zipPath)) {
+ ZipWriter writer = new ZipWriter(stream);
if (files) {
Logger.Log(LogType.SystemActivity, "Compressing files...");
- SaveFiles(package, filesList);
+ SaveFiles(writer, filesList);
}
- if (db) SaveDatabase(package);
+ if (db) SaveDatabase(writer);
+
+ writer.FinishEntries();
+ writer.WriteFooter();
Logger.Log(LogType.SystemActivity, "Compressed all data!");
}
p.Message("Backup of (" + (files ? "everything" + (db ? "" : " but database") : "database") + ") complete!");
Logger.Log(LogType.SystemActivity, "Server backed up!");
}
-
- const string undo1 = "extra/undo/", undo2 = @"extra\undo\";
- const string prev1 = "extra/undoPrevious/", prev2 = @"extra\undoPrevious\";
- const string levelBackup1 = "levels/backups/", levelBackup2 = @"levels\backups\";
- const string levelPrev1 = "levels/prev/", levelPrev2 = @"levels\prev\";
- const string blockDB1 = "blockdb/", blockDB2 = @"blockdb\";
- static char[] directorySeparators = new char[] { '/', '\\' };
- static List GetAllFiles(DirectoryInfo dir, Uri baseUri, bool lite) {
- List list = new List();
- foreach (FileSystemInfo entry in dir.GetFileSystemInfos()) {
- if (entry is FileInfo) {
- string path = ((FileInfo)entry).FullName;
- if (lite && (path.Contains(undo1) || path.Contains(undo2))) continue;
- if (lite && (path.Contains(prev1) || path.Contains(prev2))) continue;
- if (lite && (path.Contains(levelBackup1) || path.Contains(levelBackup2))) continue;
- if (lite && (path.Contains(levelPrev1) || path.Contains(levelPrev2))) continue;
- if (lite && (path.Contains(blockDB1) || path.Contains(blockDB2))) continue;
-
- try {
- Uri uri = baseUri.MakeRelativeUri(new Uri(path));
- if (uri.ToString().IndexOfAny(directorySeparators) > 0) {
- list.Add(PackUriHelper.CreatePartUri(uri));
- }
- } catch {
- Logger.Log(LogType.Warning, "Error trying to backup file: " + path);
- throw;
- }
- } else {
- list.AddRange(GetAllFiles((DirectoryInfo)entry, baseUri, lite));
- }
- }
- return list;
- }
-
- static void SaveFiles(ZipPackage package, List partURIs) {
- foreach (Uri loc in partURIs) {
- string file = Uri.UnescapeDataString(loc.ToString());
- if (file.Contains(path)) continue;
-
- try {
- PackagePart part = package.CreatePart(loc, "");
- using (Stream src = new FileStream("./" + file, FileMode.Open, FileAccess.Read))
- CopyStream(src, part.GetStream());
- } catch (Exception ex) {
- Logger.LogError("Failed to backup file: " + file, ex);
- }
- }
- }
-
- static void SaveDatabase(ZipPackage package) {
- Logger.Log(LogType.SystemActivity, "Compressing Database...");
- Uri uri = new Uri("/SQL.sql", UriKind.Relative);
+ static List GetAllFiles(bool lite) {
+ string[] all = Directory.GetFiles("./", "*", SearchOption.AllDirectories);
+ List paths = new List();
- PackagePart part = package.CreatePart(uri, "", CompressionOption.Normal);
- CopyStream(File.OpenRead("SQL.sql"), part.GetStream());
- Logger.Log(LogType.SystemActivity, "Database compressed");
+ for (int i = 0; i < all.Length; i++) {
+ string path = all[i];
+ // convert to zip entry form
+ path = path.Replace('\\', '/').Replace("./", "");
+
+ if (lite && path.Contains("extra/undo/")) continue;
+ if (lite && path.Contains("extra/undoPrevious/")) continue;
+ if (lite && path.Contains("levels/prev")) continue;
+ if (lite && path.Contains("levels/backups/")) continue;
+ if (lite && path.Contains("blockdb/")) continue;
+
+ //if (path.Contains(zipPath)) continue;
+ //if (path.Contains(sqlPath)) continue;
+ //if (path.Contains(dbPath)) continue;
+ // ignore files in root folder
+ if (path.IndexOf('/') == -1) continue;
+ paths.Add(path);
+ }
+ return paths;
}
-
- static void CopyStream(Stream source, Stream target) {
- const int bufSize = 0x1000;
- byte[] buf = new byte[bufSize];
- int bytesRead = 0;
- while ((bytesRead = source.Read(buf, 0, bufSize)) > 0)
- target.Write(buf, 0, bytesRead);
+
+ static void SaveFiles(ZipWriter writer, List paths) {
+ foreach (string path in paths) {
+ try {
+ using (Stream src = File.OpenRead(path)) {
+ writer.WriteEntry(src, path);
+ }
+ } catch (Exception ex) {
+ Logger.LogError("Failed to backup file: " + path, ex);
+ }
+ }
+ }
+
+ static void SaveDatabase(ZipWriter writer) {
+ Logger.Log(LogType.SystemActivity, "Compressing Database...");
+ // TODO: gzip compress
+ using (FileStream fs = File.OpenRead(sqlPath)) {
+ writer.WriteEntry(fs, sqlPath);
+ }
+ Logger.Log(LogType.SystemActivity, "Database compressed");
}
public static void ExtractPackage(Player p) {
int errors = 0;
- using (FileStream src = File.OpenRead(path))
+ using (FileStream src = File.OpenRead(zipPath))
using (ZipPackage zip = (ZipPackage)ZipPackage.Open(src))
{
PackagePartCollection parts = zip.GetParts();
@@ -146,6 +129,14 @@ namespace MCGalaxy {
p.Message("It is recommended that you restart the server, although this is not required.");
}
+ static void CopyStream(Stream source, Stream target) {
+ const int bufSize = 0x1000;
+ byte[] buf = new byte[bufSize];
+ int bytesRead = 0;
+ while ((bytesRead = source.Read(buf, 0, bufSize)) > 0)
+ target.Write(buf, 0, bytesRead);
+ }
+
static void ExtractItem(ZipPackagePart item, ref int errors) {
string entry = item.Uri.ToString();
string file = "./" + Uri.UnescapeDataString(entry);
diff --git a/MCGalaxy/Server/ZipWriter.cs b/MCGalaxy/Server/ZipWriter.cs
new file mode 100644
index 000000000..0b2670694
--- /dev/null
+++ b/MCGalaxy/Server/ZipWriter.cs
@@ -0,0 +1,283 @@
+/*
+ Copyright 2015 MCGalaxy
+
+ Dual-licensed under the Educational Community License, Version 2.0 and
+ the GNU General Public License, Version 3 (the "Licenses"); you may
+ not use this file except in compliance with the Licenses. You may
+ obtain a copy of the Licenses at
+
+ http://www.opensource.org/licenses/ecl2.php
+ http://www.gnu.org/licenses/gpl-3.0.html
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the Licenses are distributed on an "AS IS"
+ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ or implied. See the Licenses for the specific language governing
+ permissions and limitations under the Licenses.
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+
+namespace MCGalaxy {
+
+ struct ZipEntry {
+ public string Filename;
+ public long CompressedSize, UncompressedSize, LocalHeaderOffset;
+ public uint Crc32;
+
+ public void Reset() {
+ // signify to use zip64 version of these fields instead
+ CompressedSize = uint.MaxValue;
+ UncompressedSize = uint.MaxValue;
+ LocalHeaderOffset = uint.MaxValue;
+ }
+ }
+
+ sealed class ZipEntryStream : Stream {
+ public uint Crc32;
+ public long CompressedLen;
+ Stream stream;
+
+ public ZipEntryStream(Stream stream) {
+ this.stream = stream;
+ Crc32 = uint.MaxValue;
+ }
+
+ public override bool CanRead { get { return false; } }
+ public override bool CanSeek { get { return false; } }
+ public override bool CanWrite { get { return true; } }
+
+ static Exception ex = new NotSupportedException("Stream does not support length/seeking.");
+ public override void Flush() { stream.Flush(); }
+ public override long Length { get { throw ex; } }
+ public override long Position { get { throw ex; } set { throw ex; } }
+ public override int Read(byte[] buffer, int offset, int count) { throw ex; }
+ public override long Seek(long offset, SeekOrigin origin) { throw ex; }
+ public override void SetLength(long length) { throw ex; }
+
+ public override void Write(byte[] buffer, int offset, int count) {
+ stream.Write(buffer, offset, count);
+ CompressedLen += count;
+
+ for (int i = offset; i < offset + count; i++) {
+ Crc32 = crc32Table[(Crc32 ^ buffer[i]) & 0xFF] ^ (Crc32 >> 8);
+ }
+ }
+
+ public override void WriteByte(byte value) {
+ stream.WriteByte(value);
+ CompressedLen++;
+ Crc32 = crc32Table[(Crc32 ^ value) & 0xFF] ^ (Crc32 >> 8);
+ }
+
+ public override void Close() {
+ stream = null;
+ Crc32 ^= uint.MaxValue;
+ }
+
+ static uint[] crc32Table;
+ static ZipEntryStream() {
+ crc32Table = new uint[256];
+ for (int i = 0; i < crc32Table.Length; i++) {
+ uint c = (uint)i;
+
+ for (int j = 0; j < 8; j++ ) {
+ if ((c & 1) != 0) {
+ c = 0xEDB88320 ^ (c >> 1);
+ } else { c >>= 1; }
+ }
+ crc32Table[i] = c;
+ }
+ }
+ }
+
+ public sealed class ZipWriter {
+ BinaryWriter w;
+ Stream stream;
+ byte[] buffer = new byte[81920];
+
+ DateTime now = DateTime.Now;
+ bool zip64;
+ List entries = new List();
+
+ int numEntries;
+ long centralDirOffset, centralDirSize, zip64EndOffset;
+ const ushort ver_norm = 20, ver_zip64 = 45, zip64Extra = 28;
+
+ public ZipWriter(Stream stream) {
+ this.stream = stream;
+ w = new BinaryWriter(stream);
+ }
+
+ public void WriteEntry(Stream src, string file) {
+ ZipEntry entry = default(ZipEntry);
+ entry.Filename = file;
+ entry.LocalHeaderOffset = stream.Position;
+
+ // leave some room to fill in header later
+ int headerSize = 30 + file.Length + zip64Extra;
+ stream.Write(buffer, 0, headerSize);
+
+ ZipEntryStream dst;
+ using (dst = new ZipEntryStream(stream)) {
+ int read = 0;
+
+ while ((read = src.Read(buffer, 0, buffer.Length)) > 0) {
+ dst.Write(buffer, 0, read);
+ entry.UncompressedSize += read;
+ }
+ }
+
+ entry.CompressedSize = dst.CompressedLen;
+ entry.Crc32 = dst.Crc32;
+ entries.Add(entry); numEntries++;
+ }
+
+ public void FinishEntries() {
+ // account for central directory too
+ const int maxLen = int.MaxValue - 4 * 1000 * 1000;
+ zip64 = numEntries >= ushort.MaxValue || stream.Length >= maxLen;
+ long pos = stream.Position;
+
+ for (int i = 0; i < numEntries; i++) {
+ // turns out we didn't actually need zip64 extra field
+ ZipEntry entry = entries[i];
+ if (!zip64) entry.LocalHeaderOffset += zip64Extra;
+
+ stream.Seek(entry.LocalHeaderOffset, SeekOrigin.Begin);
+ WriteLocalFileRecord(entry);
+ entries[i] = entry;
+ }
+
+ stream.Seek(pos, SeekOrigin.Begin);
+ }
+
+ public void WriteFooter() {
+ centralDirOffset = stream.Position;
+ for (int i = 0; i < numEntries; i++) {
+ WriteCentralDirectoryRecord(entries[i]);
+ }
+ centralDirSize = stream.Position - centralDirOffset;
+
+ if (zip64) WriteZip64EndOfCentralDirectory();
+ WriteEndOfCentralDirectoryRecord();
+ }
+
+ void WriteZip64EndOfCentralDirectory() {
+ zip64EndOffset = stream.Position;
+ WriteZip64EndOfCentralDirectoryRecord();
+ WriteZip64EndOfCentralDirectoryLocator();
+
+ // signify to use zip64 record to find data
+ numEntries = ushort.MaxValue;
+ centralDirOffset = uint.MaxValue;
+ centralDirSize = uint.MaxValue;
+ }
+
+
+ void WriteLocalFileRecord(ZipEntry entry) {
+ ushort extraLen = (ushort)(zip64 ? zip64Extra : 0);
+ ushort version = zip64 ? ver_zip64 : ver_norm;
+ ZipEntry copy = entry;
+ if (zip64) entry.Reset();
+
+ w.Write(0x04034b50);
+ w.Write(version);
+ w.Write((ushort)0); // bitflags
+ w.Write((ushort)0); // compression method
+ WriteLastModified();
+ w.Write(entry.Crc32);
+ w.Write((int)entry.CompressedSize);
+ w.Write((int)entry.UncompressedSize);
+ w.Write((ushort)entry.Filename.Length);
+ w.Write(extraLen);
+
+ WriteString(entry.Filename);
+ if (zip64) WriteZip64ExtraField(copy, false);
+ }
+
+ void WriteCentralDirectoryRecord(ZipEntry entry) {
+ ushort extraLen = (ushort)(zip64 ? zip64Extra : 0);
+ ushort version = zip64 ? ver_zip64 : ver_norm;
+ ZipEntry copy = entry;
+ if (zip64) entry.Reset();
+
+ w.Write(0x02014b50); // signature
+ w.Write(version);
+ w.Write(version);
+ w.Write((ushort)0); // bitflags
+ w.Write((ushort)0); // compression method
+ WriteLastModified();
+ w.Write(entry.Crc32);
+ w.Write((int)entry.CompressedSize);
+ w.Write((int)entry.UncompressedSize);
+ w.Write((ushort)entry.Filename.Length);
+ w.Write(extraLen);
+ w.Write((ushort)0); // file comment length
+ w.Write((ushort)0); // disk number
+ w.Write((ushort)0); // internal attributes
+ w.Write(0); // external attributes
+ w.Write((int)entry.LocalHeaderOffset);
+
+ WriteString(entry.Filename);
+ if (zip64) WriteZip64ExtraField(copy, true);
+ }
+
+ void WriteString(string str) {
+ for (int i = 0; i < str.Length; i++) {
+ w.Write((byte)str[i].UnicodeToCp437());
+ }
+ }
+
+ void WriteZip64ExtraField(ZipEntry entry, bool offset) {
+ int len = zip64Extra - 4; // ignore header size
+ if (!offset) len -= 8;
+
+ w.Write((ushort)1); // mapping id
+ w.Write((ushort)len);
+ w.Write(entry.UncompressedSize);
+ w.Write(entry.CompressedSize);
+ if (offset) w.Write(entry.LocalHeaderOffset);
+ }
+
+ void WriteLastModified() {
+ int modTime = (now.Second / 2) | (now.Minute << 5) | (now.Hour << 11);
+ int modDate = (now.Day) | (now.Month << 5) | ((now.Year - 1980) << 9);
+ w.Write((ushort)modTime);
+ w.Write((ushort)modDate);
+ }
+
+ void WriteZip64EndOfCentralDirectoryRecord() {
+ w.Write((uint)0x06064b50);
+ const long zip64EndDataSize = (2 * 2) + (2 * 4) + (4 * 8);
+ w.Write(zip64EndDataSize);
+ w.Write(ver_zip64);
+ w.Write(ver_zip64);
+ w.Write(0); // disk number
+ w.Write(0); // disk number of central directory
+ w.Write((long)numEntries);
+ w.Write((long)numEntries);
+ w.Write(centralDirSize);
+ w.Write(centralDirOffset);
+ }
+
+ void WriteZip64EndOfCentralDirectoryLocator() {
+ w.Write((uint)0x07064b50);
+ w.Write(0); // disk number of zip64 end of central directory
+ w.Write(zip64EndOffset);
+ w.Write(1); // total number of disks
+ }
+
+ void WriteEndOfCentralDirectoryRecord() {
+ w.Write(0x06054b50);
+ w.Write((ushort)0); // disk number
+ w.Write((ushort)0); // disk number of start
+ w.Write((ushort)numEntries);
+ w.Write((ushort)numEntries);
+ w.Write((uint)centralDirSize);
+ w.Write((uint)centralDirOffset);
+ w.Write((ushort)0); // comment length
+ }
+ }
+}