summaryrefslogtreecommitdiff
path: root/lib/backupstore
diff options
context:
space:
mode:
Diffstat (limited to 'lib/backupstore')
-rw-r--r--lib/backupstore/BackupClientFileAttributes.cpp1223
-rw-r--r--lib/backupstore/BackupClientFileAttributes.h82
-rw-r--r--lib/backupstore/BackupCommands.cpp970
-rw-r--r--lib/backupstore/BackupConstants.h21
-rw-r--r--lib/backupstore/BackupStoreAccountDatabase.cpp8
-rw-r--r--lib/backupstore/BackupStoreAccountDatabase.h8
-rw-r--r--lib/backupstore/BackupStoreAccounts.cpp56
-rw-r--r--lib/backupstore/BackupStoreAccounts.h13
-rw-r--r--lib/backupstore/BackupStoreCheck.cpp482
-rw-r--r--lib/backupstore/BackupStoreCheck.h53
-rw-r--r--lib/backupstore/BackupStoreCheck2.cpp145
-rw-r--r--lib/backupstore/BackupStoreCheckData.cpp12
-rw-r--r--lib/backupstore/BackupStoreConfigVerify.cpp4
-rw-r--r--lib/backupstore/BackupStoreConstants.h44
-rw-r--r--lib/backupstore/BackupStoreContext.cpp1808
-rw-r--r--lib/backupstore/BackupStoreContext.h211
-rw-r--r--lib/backupstore/BackupStoreDirectory.cpp578
-rw-r--r--lib/backupstore/BackupStoreDirectory.h289
-rw-r--r--lib/backupstore/BackupStoreException.h17
-rw-r--r--lib/backupstore/BackupStoreException.txt72
-rw-r--r--lib/backupstore/BackupStoreFile.cpp1558
-rw-r--r--lib/backupstore/BackupStoreFile.h234
-rw-r--r--lib/backupstore/BackupStoreFileCmbDiff.cpp326
-rw-r--r--lib/backupstore/BackupStoreFileCmbIdx.cpp324
-rw-r--r--lib/backupstore/BackupStoreFileCombine.cpp410
-rw-r--r--lib/backupstore/BackupStoreFileCryptVar.cpp31
-rw-r--r--lib/backupstore/BackupStoreFileCryptVar.h39
-rw-r--r--lib/backupstore/BackupStoreFileDiff.cpp1047
-rw-r--r--lib/backupstore/BackupStoreFileEncodeStream.cpp717
-rw-r--r--lib/backupstore/BackupStoreFileEncodeStream.h137
-rw-r--r--lib/backupstore/BackupStoreFileRevDiff.cpp258
-rw-r--r--lib/backupstore/BackupStoreFileWire.h74
-rw-r--r--lib/backupstore/BackupStoreFilename.cpp281
-rw-r--r--lib/backupstore/BackupStoreFilename.h107
-rw-r--r--lib/backupstore/BackupStoreFilenameClear.cpp347
-rw-r--r--lib/backupstore/BackupStoreFilenameClear.h61
-rw-r--r--lib/backupstore/BackupStoreInfo.cpp385
-rw-r--r--lib/backupstore/BackupStoreInfo.h125
-rw-r--r--lib/backupstore/BackupStoreObjectMagic.h31
-rw-r--r--lib/backupstore/BackupStoreRefCountDatabase.cpp38
-rw-r--r--lib/backupstore/HousekeepStoreAccount.cpp1110
-rw-r--r--lib/backupstore/HousekeepStoreAccount.h113
-rw-r--r--lib/backupstore/Makefile.extra15
-rw-r--r--lib/backupstore/RunStatusProvider.h29
-rw-r--r--lib/backupstore/backupprotocol.txt235
45 files changed, 14128 insertions, 0 deletions
diff --git a/lib/backupstore/BackupClientFileAttributes.cpp b/lib/backupstore/BackupClientFileAttributes.cpp
new file mode 100644
index 00000000..d76432ba
--- /dev/null
+++ b/lib/backupstore/BackupClientFileAttributes.cpp
@@ -0,0 +1,1223 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientFileAttributes.cpp
+// Purpose: Storage of file attributes
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#ifdef HAVE_UNISTD_H
+ #include <unistd.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <limits.h>
+
+#include <algorithm>
+#include <cstring>
+#include <new>
+#include <vector>
+
+#ifdef HAVE_SYS_XATTR_H
+#include <cerrno>
+#include <sys/xattr.h>
+#endif
+
+#include <cstring>
+
+#include "BackupClientFileAttributes.h"
+#include "CommonException.h"
+#include "FileModificationTime.h"
+#include "BoxTimeToUnix.h"
+#include "BackupStoreException.h"
+#include "CipherContext.h"
+#include "CipherBlowfish.h"
+#include "MD5Digest.h"
+
+#include "MemLeakFindOn.h"
+
+// set packing to one byte
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "BeginStructPackForWire.h"
+#else
+BEGIN_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+#define ATTRIBUTETYPE_GENERIC_UNIX 1
+
+#define ATTRIBUTE_ENCODING_BLOWFISH 2
+
+typedef struct
+{
+ int32_t AttributeType;
+ u_int32_t UID;
+ u_int32_t GID;
+ u_int64_t ModificationTime;
+ u_int64_t AttrModificationTime;
+ u_int32_t UserDefinedFlags;
+ u_int32_t FileGenerationNumber;
+ u_int16_t Mode;
+ // Symbolic link filename may follow
+ // Extended attribute (xattr) information may follow, format is:
+ // u_int32_t Size of extended attribute block (excluding this word)
+ // For each of NumberOfAttributes (sorted by AttributeName):
+ // u_int16_t AttributeNameLength
+ // char AttributeName[AttributeNameLength]
+ // u_int32_t AttributeValueLength
+ // unsigned char AttributeValue[AttributeValueLength]
+ // AttributeName is 0 terminated, AttributeValue is not (and may be binary data)
+} attr_StreamFormat;
+
+// This has wire packing so it's compatible across platforms
+// Use wider than necessary sizes, just to be careful.
+typedef struct
+{
+ int32_t uid, gid, mode;
+ #ifdef WIN32
+ int64_t fileCreationTime;
+ #endif
+} attributeHashData;
+
+// Use default packing
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "EndStructPackForWire.h"
+#else
+END_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+
+#define MAX_ATTRIBUTE_HASH_SECRET_LENGTH 256
+
+// Hide private static variables from the rest of the world
+// -- don't put them as static class variables to avoid openssl/evp.h being
+// included all over the project.
+namespace
+{
+ CipherContext sBlowfishEncrypt;
+ CipherContext sBlowfishDecrypt;
+ uint8_t sAttributeHashSecret[MAX_ATTRIBUTE_HASH_SECRET_LENGTH];
+ int sAttributeHashSecretLength = 0;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::BackupClientFileAttributes()
+// Purpose: Default constructor
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+BackupClientFileAttributes::BackupClientFileAttributes()
+ : mpClearAttributes(0)
+{
+ ASSERT(sizeof(u_int64_t) == sizeof(box_time_t));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::BackupClientFileAttributes()
+// Purpose: Artifical constructor
+// Created: 2011/12/06
+//
+// --------------------------------------------------------------------------
+BackupClientFileAttributes::BackupClientFileAttributes(const EMU_STRUCT_STAT &st)
+: mpClearAttributes(0)
+{
+ ASSERT(sizeof(u_int64_t) == sizeof(box_time_t));
+ StreamableMemBlock *pnewAttr = new StreamableMemBlock;
+ FillAttributes(*pnewAttr, (const char *)NULL, st, true);
+
+ // Attributes ready. Encrypt into this block
+ EncryptAttr(*pnewAttr);
+
+ // Store the new attributes
+ RemoveClear();
+ mpClearAttributes = pnewAttr;
+ pnewAttr = 0;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::BackupClientFileAttributes(const BackupClientFileAttributes &)
+// Purpose: Copy constructor
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+BackupClientFileAttributes::BackupClientFileAttributes(const BackupClientFileAttributes &rToCopy)
+ : StreamableMemBlock(rToCopy), // base class does the hard work
+ mpClearAttributes(0)
+{
+}
+BackupClientFileAttributes::BackupClientFileAttributes(const StreamableMemBlock &rToCopy)
+ : StreamableMemBlock(rToCopy), // base class does the hard work
+ mpClearAttributes(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::~BackupClientFileAttributes()
+// Purpose: Destructor
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+BackupClientFileAttributes::~BackupClientFileAttributes()
+{
+ if(mpClearAttributes)
+ {
+ delete mpClearAttributes;
+ mpClearAttributes = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes &operator=(const BackupClientFileAttributes &)
+// Purpose: Assignment operator
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+BackupClientFileAttributes &BackupClientFileAttributes::operator=(const BackupClientFileAttributes &rAttr)
+{
+ StreamableMemBlock::Set(rAttr);
+ RemoveClear(); // make sure no decrypted version held
+ return *this;
+}
+// Assume users play nice
+BackupClientFileAttributes &BackupClientFileAttributes::operator=(const StreamableMemBlock &rAttr)
+{
+ StreamableMemBlock::Set(rAttr);
+ RemoveClear(); // make sure no decrypted version held
+ return *this;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::operator==(const BackupClientFileAttributes &)
+// Purpose: Comparison operator
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+bool BackupClientFileAttributes::operator==(const BackupClientFileAttributes &rAttr) const
+{
+ EnsureClearAvailable();
+ rAttr.EnsureClearAvailable();
+
+ return mpClearAttributes->operator==(*rAttr.mpClearAttributes);
+}
+// Too dangerous to allow -- put the two names the wrong way round, and it compares encrypted data.
+/*bool BackupClientFileAttributes::operator==(const StreamableMemBlock &rAttr) const
+{
+ StreamableMemBlock *pDecoded = 0;
+
+ try
+ {
+ EnsureClearAvailable();
+ StreamableMemBlock *pDecoded = MakeClear(rAttr);
+
+ // Compare using clear version
+ bool compared = mpClearAttributes->operator==(rAttr);
+
+ // Delete temporary
+ delete pDecoded;
+
+ return compared;
+ }
+ catch(...)
+ {
+ delete pDecoded;
+ throw;
+ }
+}*/
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::Compare(const BackupClientFileAttributes &, bool)
+// Purpose: Compare, optionally ignoring the attribute
+// modification time and/or modification time, and some
+// data which is irrelevant in practise (eg file
+// generation number)
+// Created: 10/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupClientFileAttributes::Compare(const BackupClientFileAttributes &rAttr,
+ bool IgnoreAttrModTime, bool IgnoreModTime) const
+{
+ EnsureClearAvailable();
+ rAttr.EnsureClearAvailable();
+
+ // Check sizes are the same, as a first check
+ if(mpClearAttributes->GetSize() != rAttr.mpClearAttributes->GetSize())
+ {
+ BOX_TRACE("Attribute Compare: Attributes objects are "
+ "different sizes, cannot compare them: local " <<
+ mpClearAttributes->GetSize() << " bytes, remote " <<
+ rAttr.mpClearAttributes->GetSize() << " bytes");
+ return false;
+ }
+
+ // Then check the elements of the two things
+ // Bytes are checked in network order, but this doesn't matter as we're only checking for equality.
+ attr_StreamFormat *a1 = (attr_StreamFormat*)mpClearAttributes->GetBuffer();
+ attr_StreamFormat *a2 = (attr_StreamFormat*)rAttr.mpClearAttributes->GetBuffer();
+
+ #define COMPARE(attribute, message) \
+ if (a1->attribute != a2->attribute) \
+ { \
+ BOX_TRACE("Attribute Compare: " << message << " differ: " \
+ "local " << ntoh(a1->attribute) << ", " \
+ "remote " << ntoh(a2->attribute)); \
+ return false; \
+ }
+ COMPARE(AttributeType, "Attribute types");
+ COMPARE(UID, "UIDs");
+ COMPARE(GID, "GIDs");
+ COMPARE(UserDefinedFlags, "User-defined flags");
+ COMPARE(Mode, "Modes");
+
+ if(!IgnoreModTime)
+ {
+ uint64_t t1 = box_ntoh64(a1->ModificationTime);
+ uint64_t t2 = box_ntoh64(a2->ModificationTime);
+ time_t s1 = BoxTimeToSeconds(t1);
+ time_t s2 = BoxTimeToSeconds(t2);
+ if(s1 != s2)
+ {
+ BOX_TRACE("Attribute Compare: File modification "
+ "times differ: local " <<
+ FormatTime(t1, true) << " (" << s1 << "), "
+ "remote " <<
+ FormatTime(t2, true) << " (" << s2 << ")");
+ return false;
+ }
+ }
+
+ if(!IgnoreAttrModTime)
+ {
+ uint64_t t1 = box_ntoh64(a1->AttrModificationTime);
+ uint64_t t2 = box_ntoh64(a2->AttrModificationTime);
+ time_t s1 = BoxTimeToSeconds(t1);
+ time_t s2 = BoxTimeToSeconds(t2);
+ if(s1 != s2)
+ {
+ BOX_TRACE("Attribute Compare: Attribute modification "
+ "times differ: local " <<
+ FormatTime(t1, true) << " (" << s1 << "), "
+ "remote " <<
+ FormatTime(t2, true) << " (" << s2 << ")");
+ return false;
+ }
+ }
+
+ // Check symlink string?
+ unsigned int size = mpClearAttributes->GetSize();
+ if(size > sizeof(attr_StreamFormat))
+ {
+ // Symlink strings don't match. This also compares xattrs
+ int datalen = size - sizeof(attr_StreamFormat);
+
+ if(::memcmp(a1 + 1, a2 + 1, datalen) != 0)
+ {
+ std::string s1((char *)(a1 + 1), datalen);
+ std::string s2((char *)(a2 + 1), datalen);
+ BOX_TRACE("Attribute Compare: Symbolic link target "
+ "or extended attributes differ: "
+ "local " << PrintEscapedBinaryData(s1) << ", "
+ "remote " << PrintEscapedBinaryData(s2));
+ return false;
+ }
+ }
+
+ // Passes all test, must be OK
+ return true;
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::ReadAttributes(
+// const char *Filename, bool ZeroModificationTimes,
+// box_time_t *pModTime, box_time_t *pAttrModTime,
+// int64_t *pFileSize, InodeRefType *pInodeNumber,
+// bool *pHasMultipleLinks)
+// Purpose: Read the attributes of the file, and store them
+// ready for streaming. Optionally retrieve the
+// modification time and attribute modification time.
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::ReadAttributes(const std::string& Filename,
+ bool ZeroModificationTimes, box_time_t *pModTime,
+ box_time_t *pAttrModTime, int64_t *pFileSize,
+ InodeRefType *pInodeNumber, bool *pHasMultipleLinks)
+{
+ StreamableMemBlock *pnewAttr = 0;
+ try
+ {
+ EMU_STRUCT_STAT st;
+ if(EMU_LSTAT(Filename.c_str(), &st) != 0)
+ {
+ THROW_SYS_FILE_ERROR("Failed to stat file",
+ Filename, CommonException, OSFileError)
+ }
+
+ // Modification times etc
+ if(pModTime) {*pModTime = FileModificationTime(st);}
+ if(pAttrModTime) {*pAttrModTime = FileAttrModificationTime(st);}
+ if(pFileSize) {*pFileSize = st.st_size;}
+ if(pInodeNumber) {*pInodeNumber = st.st_ino;}
+ if(pHasMultipleLinks) {*pHasMultipleLinks = (st.st_nlink > 1);}
+
+ pnewAttr = new StreamableMemBlock;
+
+ FillAttributes(*pnewAttr, Filename, st, ZeroModificationTimes);
+
+#ifndef WIN32
+ // Is it a link?
+ if((st.st_mode & S_IFMT) == S_IFLNK)
+ {
+ FillAttributesLink(*pnewAttr, Filename, st);
+ }
+#endif
+
+ FillExtendedAttr(*pnewAttr, Filename);
+
+#ifdef WIN32
+ //this is to catch those problems with invalid time stamps stored...
+ //need to find out the reason why - but also a catch as well.
+
+ attr_StreamFormat *pattr =
+ (attr_StreamFormat*)pnewAttr->GetBuffer();
+ ASSERT(pattr != 0);
+
+ // __time64_t winTime = BoxTimeToSeconds(
+ // pnewAttr->ModificationTime);
+
+ u_int64_t modTime = box_ntoh64(pattr->ModificationTime);
+ box_time_t modSecs = BoxTimeToSeconds(modTime);
+ __time64_t winTime = modSecs;
+
+ // _MAX__TIME64_T doesn't seem to be defined, but the code below
+ // will throw an assertion failure if we exceed it :-)
+ // Microsoft says dates up to the year 3000 are valid, which
+ // is a bit more than 15 * 2^32. Even that doesn't seem
+ // to be true (still aborts), but it can at least hold 2^32.
+ if (winTime >= 0x100000000LL || _gmtime64(&winTime) == 0)
+ {
+ BOX_ERROR("Invalid Modification Time caught for "
+ "file: '" << Filename << "'");
+ pattr->ModificationTime = 0;
+ }
+
+ modTime = box_ntoh64(pattr->AttrModificationTime);
+ modSecs = BoxTimeToSeconds(modTime);
+ winTime = modSecs;
+
+ if (winTime > 0x100000000LL || _gmtime64(&winTime) == 0)
+ {
+ BOX_ERROR("Invalid Attribute Modification Time "
+ "caught for file: '" << Filename << "'");
+ pattr->AttrModificationTime = 0;
+ }
+#endif
+
+ // Attributes ready. Encrypt into this block
+ EncryptAttr(*pnewAttr);
+
+ // Store the new attributes
+ RemoveClear();
+ mpClearAttributes = pnewAttr;
+ pnewAttr = 0;
+ }
+ catch(...)
+ {
+ // clean up
+ delete pnewAttr;
+ pnewAttr = 0;
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::FillAttributes()
+// Purpose: Private function, handles standard attributes for all objects
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::FillAttributes(
+ StreamableMemBlock &outputBlock, const std::string& rFilename,
+ const EMU_STRUCT_STAT &st, bool ZeroModificationTimes
+)
+{
+ outputBlock.ResizeBlock(sizeof(attr_StreamFormat));
+ attr_StreamFormat *pattr = (attr_StreamFormat*)outputBlock.GetBuffer();
+ ASSERT(pattr != 0);
+
+ // Fill in the entries
+ pattr->AttributeType = htonl(ATTRIBUTETYPE_GENERIC_UNIX);
+ pattr->UID = htonl(st.st_uid);
+ pattr->GID = htonl(st.st_gid);
+ if(ZeroModificationTimes)
+ {
+ pattr->ModificationTime = 0;
+ pattr->AttrModificationTime = 0;
+ }
+ else
+ {
+ pattr->ModificationTime = box_hton64(FileModificationTime(st));
+ pattr->AttrModificationTime = box_hton64(FileAttrModificationTime(st));
+ }
+ pattr->Mode = htons(st.st_mode);
+
+#ifndef HAVE_STRUCT_STAT_ST_FLAGS
+ pattr->UserDefinedFlags = 0;
+ pattr->FileGenerationNumber = 0;
+#else
+ pattr->UserDefinedFlags = htonl(st.st_flags);
+ pattr->FileGenerationNumber = htonl(st.st_gen);
+#endif
+}
+#ifndef WIN32
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::FillAttributesLink(
+// StreamableMemBlock &outputBlock,
+// const char *Filename, struct stat &st)
+// Purpose: Private function, handles the case where a symbolic link is needed
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::FillAttributesLink(
+ StreamableMemBlock &outputBlock, const std::string& Filename,
+ struct stat &st)
+{
+ // Make sure we're only called for symbolic links
+ ASSERT((st.st_mode & S_IFMT) == S_IFLNK);
+
+ // Get the filename the link is linked to
+ char linkedTo[PATH_MAX+4];
+ int linkedToSize = ::readlink(Filename.c_str(), linkedTo, PATH_MAX);
+ if(linkedToSize == -1)
+ {
+ BOX_LOG_SYS_ERROR("Failed to readlink '" << Filename << "'");
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ int oldSize = outputBlock.GetSize();
+ outputBlock.ResizeBlock(oldSize+linkedToSize+1);
+ char* buffer = static_cast<char*>(outputBlock.GetBuffer());
+
+ // Add the path name for the symbolic link, and add 0 termination
+ std::memcpy(buffer+oldSize, linkedTo, linkedToSize);
+ buffer[oldSize+linkedToSize] = '\0';
+}
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::FillExtendedAttr(const char *, unsigned char**)
+// Purpose: Private function, read the extended attributes of the file into the block
+// Created: 2005/06/12
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::FillExtendedAttr(StreamableMemBlock &outputBlock,
+ const std::string& Filename)
+{
+#ifdef HAVE_SYS_XATTR_H
+ int listBufferSize = 10000;
+ char* list = new char[listBufferSize];
+
+ try
+ {
+ // This returns an unordered list of attribute names, each 0 terminated,
+ // concatenated together
+ int listSize = ::llistxattr(Filename.c_str(), list, listBufferSize);
+
+ if(listSize>listBufferSize)
+ {
+ delete[] list, list = NULL;
+ list = new char[listSize];
+ listSize = ::llistxattr(Filename.c_str(), list, listSize);
+ }
+
+ if(listSize>0)
+ {
+ // Extract list of attribute names so we can sort them
+ std::vector<std::string> attrKeys;
+ for(int i = 0; i<listSize; ++i)
+ {
+ std::string attrKey(list+i);
+ i += attrKey.size();
+ attrKeys.push_back(attrKey);
+ }
+ sort(attrKeys.begin(), attrKeys.end());
+
+ // Make initial space in block
+ int xattrSize = outputBlock.GetSize();
+ int xattrBufferSize = (xattrSize+listSize)>500 ? (xattrSize+listSize)*2 : 1000;
+ outputBlock.ResizeBlock(xattrBufferSize);
+ unsigned char* buffer = static_cast<unsigned char*>(outputBlock.GetBuffer());
+
+ // Leave space for attr block size later
+ int xattrBlockSizeOffset = xattrSize;
+ xattrSize += sizeof(u_int32_t);
+
+ // Loop for each attribute
+ for(std::vector<std::string>::const_iterator attrKeyI = attrKeys.begin(); attrKeyI!=attrKeys.end(); ++attrKeyI)
+ {
+ std::string attrKey(*attrKeyI);
+
+ if(xattrSize+sizeof(u_int16_t)+attrKey.size()+1+sizeof(u_int32_t)>static_cast<unsigned int>(xattrBufferSize))
+ {
+ xattrBufferSize = (xattrBufferSize+sizeof(u_int16_t)+attrKey.size()+1+sizeof(u_int32_t))*2;
+ outputBlock.ResizeBlock(xattrBufferSize);
+ buffer = static_cast<unsigned char*>(outputBlock.GetBuffer());
+ }
+
+ // Store length and text for attibute name
+ u_int16_t keyLength = htons(attrKey.size()+1);
+ std::memcpy(buffer+xattrSize, &keyLength, sizeof(u_int16_t));
+ xattrSize += sizeof(u_int16_t);
+ std::memcpy(buffer+xattrSize, attrKey.c_str(), attrKey.size()+1);
+ xattrSize += attrKey.size()+1;
+
+ // Leave space for value size
+ int valueSizeOffset = xattrSize;
+ xattrSize += sizeof(u_int32_t);
+
+ // Find size of attribute (must call with buffer and length 0 on some platforms,
+ // as -1 is returned if the data doesn't fit.)
+ int valueSize = ::lgetxattr(Filename.c_str(), attrKey.c_str(), 0, 0);
+ if(valueSize<0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to get "
+ "extended attribute size of "
+ "'" << Filename << "': " <<
+ attrKey);
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ // Resize block, if needed
+ if(xattrSize+valueSize>xattrBufferSize)
+ {
+ xattrBufferSize = (xattrBufferSize+valueSize)*2;
+ outputBlock.ResizeBlock(xattrBufferSize);
+ buffer = static_cast<unsigned char*>(outputBlock.GetBuffer());
+ }
+
+ // This gets the attribute value (may be text or binary), no termination
+ valueSize = ::lgetxattr(Filename.c_str(),
+ attrKey.c_str(), buffer+xattrSize,
+ xattrBufferSize-xattrSize);
+ if(valueSize<0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to get "
+ "extended attribute of "
+ "'" << Filename << "': " <<
+ attrKey);
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+ xattrSize += valueSize;
+
+ // Fill in value size
+ u_int32_t valueLength = htonl(valueSize);
+ std::memcpy(buffer+valueSizeOffset, &valueLength, sizeof(u_int32_t));
+ }
+
+ // Fill in attribute block size
+ u_int32_t xattrBlockLength = htonl(xattrSize-xattrBlockSizeOffset-sizeof(u_int32_t));
+ std::memcpy(buffer+xattrBlockSizeOffset, &xattrBlockLength, sizeof(u_int32_t));
+
+ outputBlock.ResizeBlock(xattrSize);
+ }
+ else if(listSize<0)
+ {
+ if(errno == EOPNOTSUPP || errno == EACCES)
+ {
+ // fail silently
+ }
+ else if(errno == ERANGE)
+ {
+ BOX_ERROR("Failed to list extended "
+ "attributes of '" << Filename << "': "
+ "buffer too small, not backed up");
+ }
+ else
+ {
+ BOX_LOG_SYS_ERROR("Failed to list extended "
+ "attributes of '" << Filename << "', "
+ "not backed up");
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+ }
+ }
+ catch(...)
+ {
+ delete[] list;
+ throw;
+ }
+ delete[] list;
+#endif
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::GetModificationTimes()
+// Purpose: Returns the modification time embedded in the
+// attributes.
+// Created: 2010/02/24
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::GetModificationTimes(
+ box_time_t *pModificationTime,
+ box_time_t *pAttrModificationTime) const
+{
+ // Got something loaded
+ if(GetSize() <= 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ // Make sure there are clear attributes to use
+ EnsureClearAvailable();
+ ASSERT(mpClearAttributes != 0);
+
+ // Check if the decrypted attributes are small enough, and the type of attributes stored
+ if(mpClearAttributes->GetSize() < (int)sizeof(int32_t))
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributesNotUnderstood);
+ }
+ int32_t *type = (int32_t*)mpClearAttributes->GetBuffer();
+ ASSERT(type != 0);
+ if(ntohl(*type) != ATTRIBUTETYPE_GENERIC_UNIX)
+ {
+ // Don't know what to do with these
+ THROW_EXCEPTION(BackupStoreException, AttributesNotUnderstood);
+ }
+
+ // Check there is enough space for an attributes block
+ if(mpClearAttributes->GetSize() < (int)sizeof(attr_StreamFormat))
+ {
+ // Too small
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ // Get pointer to structure
+ attr_StreamFormat *pattr = (attr_StreamFormat*)mpClearAttributes->GetBuffer();
+
+ if(pModificationTime)
+ {
+ *pModificationTime = box_ntoh64(pattr->ModificationTime);
+ }
+
+ if(pAttrModificationTime)
+ {
+ *pAttrModificationTime = box_ntoh64(pattr->AttrModificationTime);
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::WriteAttributes(const char *)
+// Purpose: Apply the stored attributes to the file
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::WriteAttributes(const std::string& Filename,
+ bool MakeUserWritable) const
+{
+ // Got something loaded
+ if(GetSize() <= 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ // Make sure there are clear attributes to use
+ EnsureClearAvailable();
+ ASSERT(mpClearAttributes != 0);
+
+ // Check if the decrypted attributes are small enough, and the type of attributes stored
+ if(mpClearAttributes->GetSize() < (int)sizeof(int32_t))
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributesNotUnderstood);
+ }
+ int32_t *type = (int32_t*)mpClearAttributes->GetBuffer();
+ ASSERT(type != 0);
+ if(ntohl(*type) != ATTRIBUTETYPE_GENERIC_UNIX)
+ {
+ // Don't know what to do with these
+ THROW_EXCEPTION(BackupStoreException, AttributesNotUnderstood);
+ }
+
+ // Check there is enough space for an attributes block
+ if(mpClearAttributes->GetSize() < (int)sizeof(attr_StreamFormat))
+ {
+ // Too small
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ // Get pointer to structure
+ attr_StreamFormat *pattr = (attr_StreamFormat*)mpClearAttributes->GetBuffer();
+ int xattrOffset = sizeof(attr_StreamFormat);
+
+ // is it a symlink?
+ int16_t mode = ntohs(pattr->Mode);
+ if((mode & S_IFMT) == S_IFLNK)
+ {
+ // Check things are sensible
+ if(mpClearAttributes->GetSize() < (int)sizeof(attr_StreamFormat) + 1)
+ {
+ // Too small
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+#ifdef WIN32
+ BOX_WARNING("Cannot create symbolic links on Windows: '" <<
+ Filename << "'");
+#else
+ // Make a symlink, first deleting anything in the way
+ ::unlink(Filename.c_str());
+ if(::symlink((char*)(pattr + 1), Filename.c_str()) != 0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to symlink '" << Filename <<
+ "' to '" << (char*)(pattr + 1) << "'");
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+#endif
+
+ xattrOffset += std::strlen(reinterpret_cast<char*>(pattr+1))+1;
+ }
+
+ // If working as root, set user IDs
+ if(::geteuid() == 0)
+ {
+ #ifndef HAVE_LCHOWN
+ // only if not a link, can't set their owner on this platform
+ if((mode & S_IFMT) != S_IFLNK)
+ {
+ // Not a link, use normal chown
+ if(::chown(Filename.c_str(), ntohl(pattr->UID), ntohl(pattr->GID)) != 0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to change "
+ "owner of file "
+ "'" << Filename << "'");
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ }
+ #else
+ // use the version which sets things on symlinks
+ if(::lchown(Filename.c_str(), ntohl(pattr->UID), ntohl(pattr->GID)) != 0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to change owner of "
+ "symbolic link '" << Filename << "'");
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ #endif
+ }
+
+ if(static_cast<int>(xattrOffset+sizeof(u_int32_t))<=mpClearAttributes->GetSize())
+ {
+ WriteExtendedAttr(Filename, xattrOffset);
+ }
+
+ // Stop now if symlink, because otherwise it'll just be applied to the target
+ if((mode & S_IFMT) == S_IFLNK)
+ {
+ return;
+ }
+
+ // Set modification time?
+ box_time_t modtime = box_ntoh64(pattr->ModificationTime);
+ if(modtime != 0)
+ {
+ // Work out times as timevals
+ struct timeval times[2];
+
+ #ifdef WIN32
+ BoxTimeToTimeval(box_ntoh64(pattr->ModificationTime),
+ times[1]);
+ BoxTimeToTimeval(box_ntoh64(pattr->AttrModificationTime),
+ times[0]);
+ // Because stat() returns the creation time in the ctime
+ // field under Windows, and this gets saved in the
+ // AttrModificationTime field of the serialised attributes,
+ // we subvert the first parameter of emu_utimes() to allow
+ // it to be reset to the right value on the restored file.
+ #else
+ BoxTimeToTimeval(modtime, times[1]);
+ // Copy access time as well, why not, got to set it to something
+ times[0] = times[1];
+ // Attr modification time will be changed anyway,
+ // nothing that can be done about it
+ #endif
+
+ // Try to apply
+ if(::utimes(Filename.c_str(), times) != 0)
+ {
+ BOX_LOG_SYS_WARNING("Failed to change times of "
+ "file '" << Filename << "' to ctime=" <<
+ BOX_FORMAT_TIMESPEC(times[0]) << ", mtime=" <<
+ BOX_FORMAT_TIMESPEC(times[1]));
+ }
+ }
+
+ if (MakeUserWritable)
+ {
+ mode |= S_IRWXU;
+ }
+
+ // Apply everything else... (allowable mode flags only)
+ // Mode must be done last (think setuid)
+ if(::chmod(Filename.c_str(), mode & (S_IRWXU | S_IRWXG | S_IRWXO |
+ S_ISUID | S_ISGID | S_ISVTX)) != 0)
+ {
+ BOX_LOG_SYS_ERROR("Failed to change permissions of file "
+ "'" << Filename << "'");
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::IsSymLink()
+// Purpose: Do these attributes represent a symbolic link?
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+bool BackupClientFileAttributes::IsSymLink() const
+{
+ EnsureClearAvailable();
+
+ // Got the right kind of thing?
+ if(mpClearAttributes->GetSize() < (int)sizeof(int32_t))
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ // Get the type of attributes stored
+ int32_t *type = (int32_t*)mpClearAttributes->GetBuffer();
+ ASSERT(type != 0);
+ if(ntohl(*type) == ATTRIBUTETYPE_GENERIC_UNIX && mpClearAttributes->GetSize() > (int)sizeof(attr_StreamFormat))
+ {
+ // Check link
+ attr_StreamFormat *pattr = (attr_StreamFormat*)mpClearAttributes->GetBuffer();
+ return ((ntohs(pattr->Mode)) & S_IFMT) == S_IFLNK;
+ }
+
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::RemoveClear()
+// Purpose: Private. Deletes any clear version of the attributes that may be held
+// Created: 3/12/03
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::RemoveClear() const
+{
+ if(mpClearAttributes)
+ {
+ delete mpClearAttributes;
+ }
+ mpClearAttributes = 0;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::EnsureClearAvailable()
+// Purpose: Private. Makes sure the clear version is available
+// Created: 3/12/03
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::EnsureClearAvailable() const
+{
+ if(mpClearAttributes == 0)
+ {
+ mpClearAttributes = MakeClear(*this);
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::WriteExtendedAttr(const char *Filename, int xattrOffset)
+// Purpose: Private function, apply the stored extended attributes to the file
+// Created: 2005/06/13
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::WriteExtendedAttr(const std::string& Filename, int xattrOffset) const
+{
+#ifdef HAVE_SYS_XATTR_H
+ const char* buffer = static_cast<char*>(mpClearAttributes->GetBuffer());
+
+ u_int32_t xattrBlockLength = 0;
+ std::memcpy(&xattrBlockLength, buffer+xattrOffset, sizeof(u_int32_t));
+ int xattrBlockSize = ntohl(xattrBlockLength);
+ xattrOffset += sizeof(u_int32_t);
+
+ int xattrEnd = xattrOffset+xattrBlockSize;
+ if(xattrEnd>mpClearAttributes->GetSize())
+ {
+ // Too small
+ THROW_EXCEPTION(BackupStoreException, AttributesNotLoaded);
+ }
+
+ while(xattrOffset<xattrEnd)
+ {
+ u_int16_t keyLength = 0;
+ std::memcpy(&keyLength, buffer+xattrOffset, sizeof(u_int16_t));
+ int keySize = ntohs(keyLength);
+ xattrOffset += sizeof(u_int16_t);
+
+ const char* key = buffer+xattrOffset;
+ xattrOffset += keySize;
+
+ u_int32_t valueLength = 0;
+ std::memcpy(&valueLength, buffer+xattrOffset, sizeof(u_int32_t));
+ int valueSize = ntohl(valueLength);
+ xattrOffset += sizeof(u_int32_t);
+
+ // FIXME: Warn on EOPNOTSUPP
+ if(::lsetxattr(Filename.c_str(), key, buffer+xattrOffset,
+ valueSize, 0)!=0 && errno!=EOPNOTSUPP)
+ {
+ BOX_LOG_SYS_ERROR("Failed to set extended attributes "
+ "on file '" << Filename << "'");
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ xattrOffset += valueSize;
+ }
+
+ ASSERT(xattrOffset==xattrEnd);
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::MakeClear(const StreamableMemBlock &)
+// Purpose: Static. Decrypts stored attributes.
+// Created: 3/12/03
+//
+// --------------------------------------------------------------------------
+StreamableMemBlock *BackupClientFileAttributes::MakeClear(const StreamableMemBlock &rEncrypted)
+{
+ // New block
+ StreamableMemBlock *pdecrypted = 0;
+
+ try
+ {
+ // Check the block is big enough for IV and header
+ int ivSize = sBlowfishEncrypt.GetIVLength();
+ if(rEncrypted.GetSize() <= (ivSize + 1))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadEncryptedAttributes);
+ }
+
+ // How much space is needed for the output?
+ int maxDecryptedSize = sBlowfishDecrypt.MaxOutSizeForInBufferSize(rEncrypted.GetSize() - ivSize);
+
+ // Allocate it
+ pdecrypted = new StreamableMemBlock(maxDecryptedSize);
+
+ // ptr to block
+ uint8_t *encBlock = (uint8_t*)rEncrypted.GetBuffer();
+
+ // Check that the header has right type
+ if(encBlock[0] != ATTRIBUTE_ENCODING_BLOWFISH)
+ {
+ THROW_EXCEPTION(BackupStoreException, EncryptedAttributesHaveUnknownEncoding);
+ }
+
+ // Set IV
+ sBlowfishDecrypt.SetIV(encBlock + 1);
+
+ // Decrypt
+ int decryptedSize = sBlowfishDecrypt.TransformBlock(pdecrypted->GetBuffer(), maxDecryptedSize, encBlock + 1 + ivSize, rEncrypted.GetSize() - (ivSize + 1));
+
+ // Resize block to fit
+ pdecrypted->ResizeBlock(decryptedSize);
+ }
+ catch(...)
+ {
+ delete pdecrypted;
+ pdecrypted = 0;
+ }
+
+ return pdecrypted;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::SetBlowfishKey(const void *, int)
+// Purpose: Static. Sets the key to use for encryption and decryption.
+// Created: 3/12/03
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::SetBlowfishKey(const void *pKey, int KeyLength)
+{
+ // IVs set later
+ sBlowfishEncrypt.Reset();
+ sBlowfishEncrypt.Init(CipherContext::Encrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+ sBlowfishDecrypt.Reset();
+ sBlowfishDecrypt.Init(CipherContext::Decrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::EncryptAttr(const StreamableMemBlock &)
+// Purpose: Private. Encrypt the given attributes into this block.
+// Created: 3/12/03
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::EncryptAttr(const StreamableMemBlock &rToEncrypt)
+{
+ // Free any existing block
+ FreeBlock();
+
+ // Work out the maximum amount of space we need
+ int maxEncryptedSize = sBlowfishEncrypt.MaxOutSizeForInBufferSize(rToEncrypt.GetSize());
+ // And the size of the IV
+ int ivSize = sBlowfishEncrypt.GetIVLength();
+
+ // Allocate this space
+ AllocateBlock(maxEncryptedSize + ivSize + 1);
+
+ // Store the encoding byte
+ uint8_t *block = (uint8_t*)GetBuffer();
+ block[0] = ATTRIBUTE_ENCODING_BLOWFISH;
+
+ // Generate and store an IV for this attribute block
+ int ivSize2 = 0;
+ const void *iv = sBlowfishEncrypt.SetRandomIV(ivSize2);
+ ASSERT(ivSize == ivSize2);
+
+ // Copy into the encrypted block
+ ::memcpy(block + 1, iv, ivSize);
+
+ // Do the transform
+ int encrytedSize = sBlowfishEncrypt.TransformBlock(block + 1 + ivSize, maxEncryptedSize, rToEncrypt.GetBuffer(), rToEncrypt.GetSize());
+
+ // Resize this block
+ ResizeBlock(encrytedSize + ivSize + 1);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::SetAttributeHashSecret(const void *, int)
+// Purpose: Set the secret for the filename attribute hash
+// Created: 25/4/04
+//
+// --------------------------------------------------------------------------
+void BackupClientFileAttributes::SetAttributeHashSecret(const void *pSecret, int SecretLength)
+{
+ if(SecretLength > (int)sizeof(sAttributeHashSecret))
+ {
+ SecretLength = sizeof(sAttributeHashSecret);
+ }
+ if(SecretLength < 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ // Copy
+ ::memcpy(sAttributeHashSecret, pSecret, SecretLength);
+ sAttributeHashSecretLength = SecretLength;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientFileAttributes::GenerateAttributeHash(
+// struct stat &, const std::string &,
+// const std::string &)
+// Purpose: Generate a 64 bit hash from the attributes, used to
+// detect changes. Include filename in the hash, so
+// that it changes from one file to another, so don't
+// reveal identical attributes.
+// Created: 25/4/04
+//
+// --------------------------------------------------------------------------
+uint64_t BackupClientFileAttributes::GenerateAttributeHash(EMU_STRUCT_STAT &st,
+ const std::string &filename, const std::string &leafname)
+{
+ if(sAttributeHashSecretLength == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, AttributeHashSecretNotSet)
+ }
+
+ // Assemble stuff we're interested in
+ attributeHashData hashData;
+ memset(&hashData, 0, sizeof(hashData));
+ // Use network byte order and large sizes to be cross platform
+ hashData.uid = htonl(st.st_uid);
+ hashData.gid = htonl(st.st_gid);
+ hashData.mode = htonl(st.st_mode);
+
+ #ifdef WIN32
+ // On Windows, the "file attribute modification time" is the
+ // file creation time, and we want to back this up, restore
+ // it and compare it.
+ //
+ // On other platforms, it's not very important and can't
+ // reliably be set to anything other than the current time.
+ hashData.fileCreationTime = box_hton64(st.st_ctime);
+ #endif
+
+ StreamableMemBlock xattr;
+ FillExtendedAttr(xattr, filename.c_str());
+
+ // Create a MD5 hash of the data, filename, and secret
+ MD5Digest digest;
+ digest.Add(&hashData, sizeof(hashData));
+ digest.Add(xattr.GetBuffer(), xattr.GetSize());
+ digest.Add(leafname.c_str(), leafname.size());
+ digest.Add(sAttributeHashSecret, sAttributeHashSecretLength);
+ digest.Finish();
+
+ // Return the first 64 bits of the hash
+ uint64_t result;
+ memcpy(&result, digest.DigestAsData(), sizeof(result));
+ return result;
+}
diff --git a/lib/backupstore/BackupClientFileAttributes.h b/lib/backupstore/BackupClientFileAttributes.h
new file mode 100644
index 00000000..662529ec
--- /dev/null
+++ b/lib/backupstore/BackupClientFileAttributes.h
@@ -0,0 +1,82 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientFileAttributes.h
+// Purpose: Storage of file attributes
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTFILEATTRIBUTES__H
+#define BACKUPCLIENTFILEATTRIBUTES__H
+
+#include <string>
+
+#include "StreamableMemBlock.h"
+#include "BoxTime.h"
+
+EMU_STRUCT_STAT; // declaration
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientFileAttributes
+// Purpose: Storage, streaming and application of file attributes
+// Created: 2003/10/07
+//
+// --------------------------------------------------------------------------
+class BackupClientFileAttributes : public StreamableMemBlock
+{
+public:
+ BackupClientFileAttributes();
+ BackupClientFileAttributes(const EMU_STRUCT_STAT &st);
+ BackupClientFileAttributes(const BackupClientFileAttributes &rToCopy);
+ BackupClientFileAttributes(const StreamableMemBlock &rToCopy);
+ ~BackupClientFileAttributes();
+ BackupClientFileAttributes &operator=(const BackupClientFileAttributes &rAttr);
+ BackupClientFileAttributes &operator=(const StreamableMemBlock &rAttr);
+ bool operator==(const BackupClientFileAttributes &rAttr) const;
+// bool operator==(const StreamableMemBlock &rAttr) const; // too dangerous?
+
+ bool Compare(const BackupClientFileAttributes &rAttr, bool IgnoreAttrModTime = false, bool IgnoreModTime = false) const;
+
+ // Prevent access to base class members accidently
+ void Set();
+
+ void ReadAttributes(const std::string& Filename, bool ZeroModificationTimes = false,
+ box_time_t *pModTime = 0, box_time_t *pAttrModTime = 0, int64_t *pFileSize = 0,
+ InodeRefType *pInodeNumber = 0, bool *pHasMultipleLinks = 0);
+ void WriteAttributes(const std::string& Filename,
+ bool MakeUserWritable = false) const;
+ void GetModificationTimes(box_time_t *pModificationTime,
+ box_time_t *pAttrModificationTime) const;
+
+ bool IsSymLink() const;
+
+ static void SetBlowfishKey(const void *pKey, int KeyLength);
+ static void SetAttributeHashSecret(const void *pSecret, int SecretLength);
+
+ static uint64_t GenerateAttributeHash(EMU_STRUCT_STAT &st,
+ const std::string& Filename, const std::string &leafname);
+ static void FillExtendedAttr(StreamableMemBlock &outputBlock,
+ const std::string& Filename);
+
+private:
+ static void FillAttributes(StreamableMemBlock &outputBlock,
+ const std::string& Filename, const EMU_STRUCT_STAT &st,
+ bool ZeroModificationTimes);
+ static void FillAttributesLink(StreamableMemBlock &outputBlock,
+ const std::string& Filename, struct stat &st);
+ void WriteExtendedAttr(const std::string& Filename, int xattrOffset) const;
+
+ void RemoveClear() const;
+ void EnsureClearAvailable() const;
+ static StreamableMemBlock *MakeClear(const StreamableMemBlock &rEncrypted);
+ void EncryptAttr(const StreamableMemBlock &rToEncrypt);
+
+private:
+ mutable StreamableMemBlock *mpClearAttributes;
+};
+
+#endif // BACKUPCLIENTFILEATTRIBUTES__H
+
diff --git a/lib/backupstore/BackupCommands.cpp b/lib/backupstore/BackupCommands.cpp
new file mode 100644
index 00000000..318ce55a
--- /dev/null
+++ b/lib/backupstore/BackupCommands.cpp
@@ -0,0 +1,970 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupCommands.cpp
+// Purpose: Implement commands for the Backup store protocol
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <set>
+#include <sstream>
+
+#include "autogen_BackupProtocol.h"
+#include "autogen_RaidFileException.h"
+#include "BackupConstants.h"
+#include "BackupStoreContext.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreInfo.h"
+#include "BufferedStream.h"
+#include "CollectInBufferStream.h"
+#include "FileStream.h"
+#include "InvisibleTempFileStream.h"
+#include "RaidFileController.h"
+#include "StreamableMemBlock.h"
+
+#include "MemLeakFindOn.h"
+
+#define PROTOCOL_ERROR(code) \
+ std::auto_ptr<BackupProtocolMessage>(new BackupProtocolError( \
+ BackupProtocolError::ErrorType, \
+ BackupProtocolError::code));
+
+#define CHECK_PHASE(phase) \
+ if(rContext.GetPhase() != BackupStoreContext::phase) \
+ { \
+ BOX_ERROR("Received command " << ToString() << " " \
+ "in wrong protocol phase " << rContext.GetPhaseName() << ", " \
+ "expected in " #phase); \
+ return PROTOCOL_ERROR(Err_NotInRightProtocolPhase); \
+ }
+
+#define CHECK_WRITEABLE_SESSION \
+ if(rContext.SessionIsReadOnly()) \
+ { \
+ BOX_ERROR("Received command " << ToString() << " " \
+ "in a read-only session"); \
+ return PROTOCOL_ERROR(Err_SessionReadOnly); \
+ }
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolVersion::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Return the current version, or an error if the requested version isn't allowed
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolVersion::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Version)
+
+ // Correct version?
+ if(mVersion != BACKUP_STORE_SERVER_VERSION)
+ {
+ return PROTOCOL_ERROR(Err_WrongVersion);
+ }
+
+ // Mark the next phase
+ rContext.SetPhase(BackupStoreContext::Phase_Login);
+
+ // Return our version
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolVersion(BACKUP_STORE_SERVER_VERSION));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolLogin::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Return the current version, or an error if the requested version isn't allowed
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolLogin::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Login)
+
+ // Check given client ID against the ID in the certificate certificate
+ // and that the client actually has an account on this machine
+ if(mClientID != rContext.GetClientID())
+ {
+ BOX_WARNING("Failed login from client ID " <<
+ BOX_FORMAT_ACCOUNT(mClientID) << ": "
+ "wrong certificate for this account");
+ return PROTOCOL_ERROR(Err_BadLogin);
+ }
+
+ if(!rContext.GetClientHasAccount())
+ {
+ BOX_WARNING("Failed login from client ID " <<
+ BOX_FORMAT_ACCOUNT(mClientID) << ": "
+ "no such account on this server");
+ return PROTOCOL_ERROR(Err_BadLogin);
+ }
+
+ // If we need to write, check that nothing else has got a write lock
+ if((mFlags & Flags_ReadOnly) != Flags_ReadOnly)
+ {
+ // See if the context will get the lock
+ if(!rContext.AttemptToGetWriteLock())
+ {
+ BOX_WARNING("Failed to get write lock for Client ID " <<
+ BOX_FORMAT_ACCOUNT(mClientID));
+ return PROTOCOL_ERROR(Err_CannotLockStoreForWriting);
+ }
+
+ // Debug: check we got the lock
+ ASSERT(!rContext.SessionIsReadOnly());
+ }
+
+ // Load the store info
+ rContext.LoadStoreInfo();
+
+ if(!rContext.GetBackupStoreInfo().IsAccountEnabled())
+ {
+ BOX_WARNING("Refused login from disabled client ID " <<
+ BOX_FORMAT_ACCOUNT(mClientID));
+ return PROTOCOL_ERROR(Err_DisabledAccount);
+ }
+
+ // Get the last client store marker
+ int64_t clientStoreMarker = rContext.GetClientStoreMarker();
+
+ // Mark the next phase
+ rContext.SetPhase(BackupStoreContext::Phase_Commands);
+
+ // Log login
+ BOX_NOTICE("Login from Client ID " <<
+ BOX_FORMAT_ACCOUNT(mClientID) << " "
+ "(name=" << rContext.GetAccountName() << "): " <<
+ (((mFlags & Flags_ReadOnly) != Flags_ReadOnly)
+ ?"Read/Write":"Read-only") << " from " <<
+ rContext.GetConnectionDetails());
+
+ // Get the usage info for reporting to the client
+ int64_t blocksUsed = 0, blocksSoftLimit = 0, blocksHardLimit = 0;
+ rContext.GetStoreDiscUsageInfo(blocksUsed, blocksSoftLimit, blocksHardLimit);
+
+ // Return success
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolLoginConfirmed(clientStoreMarker, blocksUsed, blocksSoftLimit, blocksHardLimit));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolFinished::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Marks end of conversation (Protocol framework handles this)
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolFinished::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ BOX_NOTICE("Session finished for Client ID " <<
+ BOX_FORMAT_ACCOUNT(rContext.GetClientID()) << " "
+ "(name=" << rContext.GetAccountName() << ")");
+
+ // Let the context know about it
+ rContext.ReceivedFinishCommand();
+
+ // can be called in any phase
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolFinished);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolListDirectory::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Command to list a directory
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolListDirectory::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Store the listing to a stream
+ std::auto_ptr<CollectInBufferStream> stream(new CollectInBufferStream);
+
+ try
+ {
+ // Ask the context for a directory
+ const BackupStoreDirectory &rdir(
+ rContext.GetDirectory(mObjectID));
+ rdir.WriteToStream(*stream, mFlagsMustBeSet,
+ mFlagsNotToBeSet, mSendAttributes,
+ false /* never send dependency info to the client */);
+ }
+ catch (RaidFileException &e)
+ {
+ if (e.GetSubType() == RaidFileException::RaidFileDoesntExist)
+ {
+ return PROTOCOL_ERROR(Err_DoesNotExist);
+ }
+ throw;
+ }
+
+ stream->SetForReading();
+
+ // Get the protocol to send the stream
+ rProtocol.SendStreamAfterCommand(static_cast< std::auto_ptr<IOStream> > (stream));
+
+ return std::auto_ptr<BackupProtocolMessage>(
+ new BackupProtocolSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolStoreFile::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Command to store a file on the server
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolStoreFile::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ std::auto_ptr<BackupProtocolMessage> hookResult =
+ rContext.StartCommandHook(*this);
+ if(hookResult.get())
+ {
+ return hookResult;
+ }
+
+ // Check that the diff from file actually exists, if it's specified
+ if(mDiffFromFileID != 0)
+ {
+ if(!rContext.ObjectExists(mDiffFromFileID,
+ BackupStoreContext::ObjectExists_File))
+ {
+ return PROTOCOL_ERROR(Err_DiffFromFileDoesNotExist);
+ }
+ }
+
+ // A stream follows, which contains the file
+ std::auto_ptr<IOStream> filestream(rProtocol.ReceiveStream());
+
+ // Ask the context to store it
+ int64_t id = 0;
+ try
+ {
+ id = rContext.AddFile(*filestream, mDirectoryObjectID,
+ mModificationTime, mAttributesHash, mDiffFromFileID,
+ mFilename,
+ true /* mark files with same name as old versions */);
+ }
+ catch(BackupStoreException &e)
+ {
+ if(e.GetSubType() == BackupStoreException::AddedFileDoesNotVerify)
+ {
+ return PROTOCOL_ERROR(Err_FileDoesNotVerify);
+ }
+ else if(e.GetSubType() == BackupStoreException::AddedFileExceedsStorageLimit)
+ {
+ return PROTOCOL_ERROR(Err_StorageLimitExceeded);
+ }
+ else
+ {
+ throw;
+ }
+ }
+
+ // Tell the caller what the file ID was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(id));
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetObject::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Command to get an arbitary object from the server
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObject::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Check the object exists
+ if(!rContext.ObjectExists(mObjectID))
+ {
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(NoObject));
+ }
+
+ // Open the object
+ std::auto_ptr<IOStream> object(rContext.OpenObject(mObjectID));
+
+ // Stream it to the peer
+ rProtocol.SendStreamAfterCommand(object);
+
+ // Tell the caller what the file was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetFile::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Command to get an file object from the server -- may have to do a bit of
+// work to get the object.
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetFile::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Check the objects exist
+ if(!rContext.ObjectExists(mObjectID)
+ || !rContext.ObjectExists(mInDirectory))
+ {
+ return PROTOCOL_ERROR(Err_DoesNotExist);
+ }
+
+ // Get the directory it's in
+ const BackupStoreDirectory &rdir(rContext.GetDirectory(mInDirectory));
+
+ // Find the object within the directory
+ BackupStoreDirectory::Entry *pfileEntry = rdir.FindEntryByID(mObjectID);
+ if(pfileEntry == 0)
+ {
+ return PROTOCOL_ERROR(Err_DoesNotExistInDirectory);
+ }
+
+ // The result
+ std::auto_ptr<IOStream> stream;
+
+ // Does this depend on anything?
+ if(pfileEntry->GetDependsNewer() != 0)
+ {
+ // File exists, but is a patch from a new version. Generate the older version.
+ std::vector<int64_t> patchChain;
+ int64_t id = mObjectID;
+ BackupStoreDirectory::Entry *en = 0;
+ do
+ {
+ patchChain.push_back(id);
+ en = rdir.FindEntryByID(id);
+ if(en == 0)
+ {
+ BOX_ERROR("Object " <<
+ BOX_FORMAT_OBJECTID(mObjectID) <<
+ " in dir " <<
+ BOX_FORMAT_OBJECTID(mInDirectory) <<
+ " for account " <<
+ BOX_FORMAT_ACCOUNT(rContext.GetClientID()) <<
+ " references object " <<
+ BOX_FORMAT_OBJECTID(id) <<
+ " which does not exist in dir");
+ return PROTOCOL_ERROR(Err_PatchConsistencyError);
+ }
+ id = en->GetDependsNewer();
+ }
+ while(en != 0 && id != 0);
+
+ // OK! The last entry in the chain is the full file, the others are patches back from it.
+ // Open the last one, which is the current from file
+ std::auto_ptr<IOStream> from(rContext.OpenObject(patchChain[patchChain.size() - 1]));
+
+ // Then, for each patch in the chain, do a combine
+ for(int p = ((int)patchChain.size()) - 2; p >= 0; --p)
+ {
+ // ID of patch
+ int64_t patchID = patchChain[p];
+
+ // Open it a couple of times
+ std::auto_ptr<IOStream> diff(rContext.OpenObject(patchID));
+ std::auto_ptr<IOStream> diff2(rContext.OpenObject(patchID));
+
+ // Choose a temporary filename for the result of the combination
+ std::ostringstream fs;
+ fs << rContext.GetStoreRoot() << ".recombinetemp." << p;
+ std::string tempFn =
+ RaidFileController::DiscSetPathToFileSystemPath(
+ rContext.GetStoreDiscSet(), fs.str(),
+ p + 16);
+
+ // Open the temporary file
+ std::auto_ptr<IOStream> combined;
+ try
+ {
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(
+ new InvisibleTempFileStream(
+ tempFn.c_str(),
+ O_RDWR | O_CREAT |
+ O_EXCL | O_BINARY |
+ O_TRUNC));
+ combined = t;
+ }
+ }
+ catch(...)
+ {
+ // Make sure it goes
+ ::unlink(tempFn.c_str());
+ throw;
+ }
+
+ // Do the combining
+ BackupStoreFile::CombineFile(*diff, *diff2, *from, *combined);
+
+ // Move to the beginning of the combined file
+ combined->Seek(0, IOStream::SeekType_Absolute);
+
+ // Then shuffle round for the next go
+ if (from.get()) from->Close();
+ from = combined;
+ }
+
+ // Now, from contains a nice file to send to the client. Reorder it
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(BackupStoreFile::ReorderFileToStreamOrder(from.get(), true /* take ownership */));
+ stream = t;
+ }
+
+ // Release from file to avoid double deletion
+ from.release();
+ }
+ else
+ {
+ // Simple case: file already exists on disc ready to go
+
+ // Open the object
+ std::auto_ptr<IOStream> object(rContext.OpenObject(mObjectID));
+ BufferedStream buf(*object);
+
+ // Verify it
+ if(!BackupStoreFile::VerifyEncodedFileFormat(buf))
+ {
+ return PROTOCOL_ERROR(Err_FileDoesNotVerify);
+ }
+
+ // Reset stream -- seek to beginning
+ object->Seek(0, IOStream::SeekType_Absolute);
+
+ // Reorder the stream/file into stream order
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(BackupStoreFile::ReorderFileToStreamOrder(object.get(), true /* take ownership */));
+ stream = t;
+ }
+
+ // Object will be deleted when the stream is deleted,
+ // so can release the object auto_ptr here to avoid
+ // premature deletion
+ object.release();
+ }
+
+ // Stream the reordered stream to the peer
+ rProtocol.SendStreamAfterCommand(stream);
+
+ // Tell the caller what the file was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolCreateDirectory::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Create directory command
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolCreateDirectory::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Check to see if the hard limit has been exceeded
+ if(rContext.HardLimitExceeded())
+ {
+ // Won't allow creation if the limit has been exceeded
+ return PROTOCOL_ERROR(Err_StorageLimitExceeded);
+ }
+
+ bool alreadyExists = false;
+ int64_t id = rContext.AddDirectory(mContainingDirectoryID, mDirectoryName, attr, mAttributesModTime, alreadyExists);
+
+ if(alreadyExists)
+ {
+ return PROTOCOL_ERROR(Err_DirectoryAlreadyExists);
+ }
+
+ // Tell the caller what the file was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(id));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolChangeDirAttributes::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Change attributes on directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolChangeDirAttributes::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Get the context to do it's magic
+ rContext.ChangeDirAttributes(mObjectID, attr, mAttributesModTime);
+
+ // Tell the caller what the file was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolSetReplacementFileAttributes::DoCommand(Protocol &, BackupStoreContext &)
+// Purpose: Change attributes on directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolSetReplacementFileAttributes::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Get the context to do it's magic
+ int64_t objectID = 0;
+ if(!rContext.ChangeFileAttributes(mFilename, mInDirectory, attr, mAttributesHash, objectID))
+ {
+ // Didn't exist
+ return PROTOCOL_ERROR(Err_DoesNotExist);
+ }
+
+ // Tell the caller what the file was
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(objectID));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolDeleteFile::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Delete a file
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolDeleteFile::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Context handles this
+ int64_t objectID = 0;
+ rContext.DeleteFile(mFilename, mInDirectory, objectID);
+
+ // return the object ID or zero for not found
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(objectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolUndeleteFile::DoCommand(
+// BackupProtocolBase &, BackupStoreContext &)
+// Purpose: Undelete a file
+// Created: 2008-09-12
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolUndeleteFile::DoCommand(
+ BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Context handles this
+ bool result = rContext.UndeleteFile(mObjectID, mInDirectory);
+
+ // return the object ID or zero for not found
+ return std::auto_ptr<BackupProtocolMessage>(
+ new BackupProtocolSuccess(result ? mObjectID : 0));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolDeleteDirectory::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Delete a directory
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolDeleteDirectory::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Check it's not asking for the root directory to be deleted
+ if(mObjectID == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ return PROTOCOL_ERROR(Err_CannotDeleteRoot);
+ }
+
+ // Context handles this
+ try
+ {
+ rContext.DeleteDirectory(mObjectID);
+ }
+ catch (BackupStoreException &e)
+ {
+ if(e.GetSubType() == BackupStoreException::MultiplyReferencedObject)
+ {
+ return PROTOCOL_ERROR(Err_MultiplyReferencedObject);
+ }
+
+ throw;
+ }
+
+ // return the object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolUndeleteDirectory::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Undelete a directory
+// Created: 23/11/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolUndeleteDirectory::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Check it's not asking for the root directory to be deleted
+ if(mObjectID == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ return PROTOCOL_ERROR(Err_CannotDeleteRoot);
+ }
+
+ // Context handles this
+ rContext.DeleteDirectory(mObjectID, true /* undelete */);
+
+ // return the object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolSetClientStoreMarker::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Command to set the client's store marker
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolSetClientStoreMarker::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Set the marker
+ rContext.SetClientStoreMarker(mClientStoreMarker);
+
+ // return store marker set
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mClientStoreMarker));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolMoveObject::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Command to move an object from one directory to another
+// Created: 2003/11/12
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolMoveObject::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Let context do this, but modify error reporting on exceptions...
+ try
+ {
+ rContext.MoveObject(mObjectID, mMoveFromDirectory, mMoveToDirectory,
+ mNewFilename, (mFlags & Flags_MoveAllWithSameName) == Flags_MoveAllWithSameName,
+ (mFlags & Flags_AllowMoveOverDeletedObject) == Flags_AllowMoveOverDeletedObject);
+ }
+ catch(BackupStoreException &e)
+ {
+ if(e.GetSubType() == BackupStoreException::CouldNotFindEntryInDirectory)
+ {
+ return PROTOCOL_ERROR(Err_DoesNotExist);
+ }
+ else if(e.GetSubType() == BackupStoreException::NameAlreadyExistsInDirectory)
+ {
+ return PROTOCOL_ERROR(Err_TargetNameExists);
+ }
+ else
+ {
+ throw;
+ }
+ }
+
+ // Return the object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetObjectName::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Command to find the name of an object
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObjectName::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Create a stream for the list of filenames
+ std::auto_ptr<CollectInBufferStream> stream(new CollectInBufferStream);
+
+ // Object and directory IDs
+ int64_t objectID = mObjectID;
+ int64_t dirID = mContainingDirectoryID;
+
+ // Data to return in the reply
+ int32_t numNameElements = 0;
+ int16_t objectFlags = 0;
+ int64_t modTime = 0;
+ uint64_t attrModHash = 0;
+ bool haveModTimes = false;
+
+ do
+ {
+ // Check the directory really exists
+ if(!rContext.ObjectExists(dirID, BackupStoreContext::ObjectExists_Directory))
+ {
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolObjectName(BackupProtocolObjectName::NumNameElements_ObjectDoesntExist, 0, 0, 0));
+ }
+
+ // Load up the directory
+ const BackupStoreDirectory &rdir(rContext.GetDirectory(dirID));
+
+ // Find the element in this directory and store it's name
+ if(objectID != ObjectID_DirectoryOnly)
+ {
+ const BackupStoreDirectory::Entry *en = rdir.FindEntryByID(objectID);
+
+ // If this can't be found, then there is a problem... tell the caller it can't be found
+ if(en == 0)
+ {
+ // Abort!
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolObjectName(BackupProtocolObjectName::NumNameElements_ObjectDoesntExist, 0, 0, 0));
+ }
+
+ // Store flags?
+ if(objectFlags == 0)
+ {
+ objectFlags = en->GetFlags();
+ }
+
+ // Store modification times?
+ if(!haveModTimes)
+ {
+ modTime = en->GetModificationTime();
+ attrModHash = en->GetAttributesHash();
+ haveModTimes = true;
+ }
+
+ // Store the name in the stream
+ en->GetName().WriteToStream(*stream);
+
+ // Count of name elements
+ ++numNameElements;
+ }
+
+ // Setup for next time round
+ objectID = dirID;
+ dirID = rdir.GetContainerID();
+
+ } while(objectID != 0 && objectID != BACKUPSTORE_ROOT_DIRECTORY_ID);
+
+ // Stream to send?
+ if(numNameElements > 0)
+ {
+ // Get the stream ready to go
+ stream->SetForReading();
+ // Tell the protocol to send the stream
+ rProtocol.SendStreamAfterCommand(static_cast< std::auto_ptr<IOStream> >(stream));
+ }
+
+ // Make reply
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolObjectName(numNameElements, modTime, attrModHash, objectFlags));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetBlockIndexByID::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Get the block index from a file, by ID
+// Created: 19/1/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByID::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Open the file
+ std::auto_ptr<IOStream> stream(rContext.OpenObject(mObjectID));
+
+ // Move the file pointer to the block index
+ BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
+
+ // Return the stream to the client
+ rProtocol.SendStreamAfterCommand(stream);
+
+ // Return the object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetBlockIndexByName::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Get the block index from a file, by name within a directory
+// Created: 19/1/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByName::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Get the directory
+ const BackupStoreDirectory &dir(rContext.GetDirectory(mInDirectory));
+
+ // Find the latest object ID within it which has the same name
+ int64_t objectID = 0;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ if(en->GetName() == mFilename)
+ {
+ // Store the ID, if it's a newer ID than the last one
+ if(en->GetObjectID() > objectID)
+ {
+ objectID = en->GetObjectID();
+ }
+ }
+ }
+
+ // Found anything?
+ if(objectID == 0)
+ {
+ // No... return a zero object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(0));
+ }
+
+ // Open the file
+ std::auto_ptr<IOStream> stream(rContext.OpenObject(objectID));
+
+ // Move the file pointer to the block index
+ BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
+
+ // Return the stream to the client
+ rProtocol.SendStreamAfterCommand(stream);
+
+ // Return the object ID
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(objectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetAccountUsage::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Return the amount of disc space used
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetAccountUsage::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Get store info from context
+ const BackupStoreInfo &rinfo(rContext.GetBackupStoreInfo());
+
+ // Find block size
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ RaidFileDiscSet &rdiscSet(rcontroller.GetDiscSet(rinfo.GetDiscSetNumber()));
+
+ // Return info
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolAccountUsage(
+ rinfo.GetBlocksUsed(),
+ rinfo.GetBlocksInOldFiles(),
+ rinfo.GetBlocksInDeletedFiles(),
+ rinfo.GetBlocksInDirectories(),
+ rinfo.GetBlocksSoftLimit(),
+ rinfo.GetBlocksHardLimit(),
+ rdiscSet.GetBlockSize()
+ ));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolGetIsAlive::DoCommand(BackupProtocolReplyable &, BackupStoreContext &)
+// Purpose: Return the amount of disc space used
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupProtocolMessage> BackupProtocolGetIsAlive::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
+{
+ CHECK_PHASE(Phase_Commands)
+
+ //
+ // NOOP
+ //
+ return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolIsAlive());
+}
diff --git a/lib/backupstore/BackupConstants.h b/lib/backupstore/BackupConstants.h
new file mode 100644
index 00000000..19d06a15
--- /dev/null
+++ b/lib/backupstore/BackupConstants.h
@@ -0,0 +1,21 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupConstants.h
+// Purpose: Constants for the backup server and client
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCONSTANTS__H
+#define BACKUPCONSTANTS__H
+
+// 15 minutes to timeout (milliseconds)
+#define BACKUP_STORE_TIMEOUT (15*60*1000)
+
+// Should the store daemon convert files to Raid immediately?
+#define BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY true
+
+#endif // BACKUPCONSTANTS__H
+
+
diff --git a/lib/backupstore/BackupStoreAccountDatabase.cpp b/lib/backupstore/BackupStoreAccountDatabase.cpp
index 72a813d5..ec44de0e 100644
--- a/lib/backupstore/BackupStoreAccountDatabase.cpp
+++ b/lib/backupstore/BackupStoreAccountDatabase.cpp
@@ -40,7 +40,11 @@ public:
// Created: 2003/08/20
//
// --------------------------------------------------------------------------
+<<<<<<< HEAD
BackupStoreAccountDatabase::BackupStoreAccountDatabase(const char *Filename)
+=======
+BackupStoreAccountDatabase::BackupStoreAccountDatabase(const std::string& Filename)
+>>>>>>> 0.12
: pImpl(new _BackupStoreAccountDatabase)
{
pImpl->mFilename = Filename;
@@ -123,7 +127,11 @@ BackupStoreAccountDatabase::Entry::~Entry()
// Created: 2003/08/21
//
// --------------------------------------------------------------------------
+<<<<<<< HEAD
std::auto_ptr<BackupStoreAccountDatabase> BackupStoreAccountDatabase::Read(const char *Filename)
+=======
+std::auto_ptr<BackupStoreAccountDatabase> BackupStoreAccountDatabase::Read(const std::string& Filename)
+>>>>>>> 0.12
{
// Database object to use
std::auto_ptr<BackupStoreAccountDatabase> db(new BackupStoreAccountDatabase(Filename));
diff --git a/lib/backupstore/BackupStoreAccountDatabase.h b/lib/backupstore/BackupStoreAccountDatabase.h
index 79573242..cb19b01b 100644
--- a/lib/backupstore/BackupStoreAccountDatabase.h
+++ b/lib/backupstore/BackupStoreAccountDatabase.h
@@ -31,11 +31,19 @@ public:
friend class _BackupStoreAccountDatabase; // to stop compiler warnings
~BackupStoreAccountDatabase();
private:
+<<<<<<< HEAD
BackupStoreAccountDatabase(const char *Filename);
BackupStoreAccountDatabase(const BackupStoreAccountDatabase &);
public:
static std::auto_ptr<BackupStoreAccountDatabase> Read(const char *Filename);
+=======
+ BackupStoreAccountDatabase(const std::string& Filename);
+ BackupStoreAccountDatabase(const BackupStoreAccountDatabase &);
+public:
+
+ static std::auto_ptr<BackupStoreAccountDatabase> Read(const std::string& Filename);
+>>>>>>> 0.12
void Write();
class Entry
diff --git a/lib/backupstore/BackupStoreAccounts.cpp b/lib/backupstore/BackupStoreAccounts.cpp
index 5c7e4d38..8cb23a6a 100644
--- a/lib/backupstore/BackupStoreAccounts.cpp
+++ b/lib/backupstore/BackupStoreAccounts.cpp
@@ -11,6 +11,7 @@
#include <stdio.h>
+<<<<<<< HEAD
#include "BoxPortsAndFiles.h"
#include "BackupStoreAccounts.h"
#include "BackupStoreAccountDatabase.h"
@@ -19,6 +20,18 @@
#include "BackupStoreInfo.h"
#include "BackupStoreDirectory.h"
#include "BackupStoreConstants.h"
+=======
+#include "BackupStoreAccounts.h"
+#include "BackupStoreAccountDatabase.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
+#include "BackupStoreInfo.h"
+#include "BackupStoreRefCountDatabase.h"
+#include "BoxPortsAndFiles.h"
+#include "RaidFileWrite.h"
+#include "StoreStructure.h"
+>>>>>>> 0.12
#include "UnixUser.h"
#include "MemLeakFindOn.h"
@@ -55,8 +68,13 @@ BackupStoreAccounts::~BackupStoreAccounts()
// Function
// Name: BackupStoreAccounts::Create(int32_t, int, int64_t, int64_t, const std::string &)
// Purpose: Create a new account on the specified disc set.
+<<<<<<< HEAD
// If rAsUsername is not empty, then the account information will be written under the
// username specified.
+=======
+// If rAsUsername is not empty, then the account information will be written under the
+// username specified.
+>>>>>>> 0.12
// Created: 2003/08/21
//
// --------------------------------------------------------------------------
@@ -102,6 +120,10 @@ void BackupStoreAccounts::Create(int32_t ID, int DiscSet, int64_t SizeSoftLimit,
std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::Load(ID, dirName, DiscSet, false /* ReadWrite */));
info->ChangeBlocksUsed(rootDirSize);
info->ChangeBlocksInDirectories(rootDirSize);
+<<<<<<< HEAD
+=======
+ info->AdjustNumDirectories(1);
+>>>>>>> 0.12
// Save it back
info->Save();
@@ -167,4 +189,38 @@ bool BackupStoreAccounts::AccountExists(int32_t ID)
return mrDatabase.EntryExists(ID);
}
+<<<<<<< HEAD
+
+=======
+void BackupStoreAccounts::LockAccount(int32_t ID, NamedLock& rNamedLock)
+{
+ const BackupStoreAccountDatabase::Entry &en(mrDatabase.GetEntry(ID));
+ std::string rootDir = MakeAccountRootDir(ID, en.GetDiscSet());
+ int discSet = en.GetDiscSet();
+ std::string writeLockFilename;
+ StoreStructure::MakeWriteLockFilename(rootDir, discSet, writeLockFilename);
+
+ bool gotLock = false;
+ int triesLeft = 8;
+ do
+ {
+ gotLock = rNamedLock.TryAndGetLock(writeLockFilename,
+ 0600 /* restrictive file permissions */);
+
+ if(!gotLock)
+ {
+ --triesLeft;
+ ::sleep(1);
+ }
+ }
+ while (!gotLock && triesLeft > 0);
+
+ if (!gotLock)
+ {
+ THROW_EXCEPTION_MESSAGE(BackupStoreException,
+ CouldNotLockStoreAccount, "Failed to get exclusive "
+ "lock on account " << ID);
+ }
+}
+>>>>>>> 0.12
diff --git a/lib/backupstore/BackupStoreAccounts.h b/lib/backupstore/BackupStoreAccounts.h
index 224d7353..e0e420bb 100644
--- a/lib/backupstore/BackupStoreAccounts.h
+++ b/lib/backupstore/BackupStoreAccounts.h
@@ -13,6 +13,10 @@
#include <string>
#include "BackupStoreAccountDatabase.h"
+<<<<<<< HEAD
+=======
+#include "NamedLock.h"
+>>>>>>> 0.12
// --------------------------------------------------------------------------
//
@@ -31,7 +35,12 @@ private:
BackupStoreAccounts(const BackupStoreAccounts &rToCopy);
public:
+<<<<<<< HEAD
void Create(int32_t ID, int DiscSet, int64_t SizeSoftLimit, int64_t SizeHardLimit, const std::string &rAsUsername);
+=======
+ void Create(int32_t ID, int DiscSet, int64_t SizeSoftLimit,
+ int64_t SizeHardLimit, const std::string &rAsUsername);
+>>>>>>> 0.12
bool AccountExists(int32_t ID);
void GetAccountRoot(int32_t ID, std::string &rRootDirOut, int &rDiscSetOut) const;
@@ -40,6 +49,10 @@ public:
{
return MakeAccountRootDir(rEntry.GetID(), rEntry.GetDiscSet());
}
+<<<<<<< HEAD
+=======
+ void LockAccount(int32_t ID, NamedLock& rNamedLock);
+>>>>>>> 0.12
private:
static std::string MakeAccountRootDir(int32_t ID, int DiscSet);
diff --git a/lib/backupstore/BackupStoreCheck.cpp b/lib/backupstore/BackupStoreCheck.cpp
index 7598094e..79a61a77 100644
--- a/lib/backupstore/BackupStoreCheck.cpp
+++ b/lib/backupstore/BackupStoreCheck.cpp
@@ -11,6 +11,7 @@
#include <stdio.h>
#include <string.h>
+<<<<<<< HEAD
#include <unistd.h>
#include "BackupStoreCheck.h"
@@ -22,6 +23,26 @@
#include "BackupStoreFile.h"
#include "BackupStoreDirectory.h"
#include "BackupStoreConstants.h"
+=======
+
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#include "autogen_BackupStoreException.h"
+#include "BackupStoreCheck.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreObjectMagic.h"
+#include "RaidFileController.h"
+#include "RaidFileException.h"
+#include "RaidFileRead.h"
+#include "RaidFileUtil.h"
+#include "RaidFileWrite.h"
+#include "StoreStructure.h"
+#include "Utils.h"
+>>>>>>> 0.12
#include "MemLeakFindOn.h"
@@ -47,9 +68,20 @@ BackupStoreCheck::BackupStoreCheck(const std::string &rStoreRoot, int DiscSetNum
mLostDirNameSerial(0),
mLostAndFoundDirectoryID(0),
mBlocksUsed(0),
+<<<<<<< HEAD
mBlocksInOldFiles(0),
mBlocksInDeletedFiles(0),
mBlocksInDirectories(0)
+=======
+ mBlocksInCurrentFiles(0),
+ mBlocksInOldFiles(0),
+ mBlocksInDeletedFiles(0),
+ mBlocksInDirectories(0),
+ mNumFiles(0),
+ mNumOldFiles(0),
+ mNumDeletedFiles(0),
+ mNumDirectories(0)
+>>>>>>> 0.12
{
}
@@ -73,12 +105,18 @@ BackupStoreCheck::~BackupStoreCheck()
//
// Function
// Name: BackupStoreCheck::Check()
+<<<<<<< HEAD
// Purpose: Perform the check on the given account
+=======
+// Purpose: Perform the check on the given account. You need to
+// hold a lock on the account before calling this!
+>>>>>>> 0.12
// Created: 21/4/04
//
// --------------------------------------------------------------------------
void BackupStoreCheck::Check()
{
+<<<<<<< HEAD
// Lock the account
{
std::string writeLockFilename;
@@ -107,6 +145,11 @@ void BackupStoreCheck::Check()
THROW_EXCEPTION(BackupStoreException, CouldNotLockStoreAccount)
}
}
+=======
+ std::string writeLockFilename;
+ StoreStructure::MakeWriteLockFilename(mStoreRoot, mDiscSetNumber, writeLockFilename);
+ ASSERT(FileExists(writeLockFilename));
+>>>>>>> 0.12
if(!mQuiet && mFixErrors)
{
@@ -296,6 +339,36 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
// Read in all the directories, and recurse downwards
{
+<<<<<<< HEAD
+=======
+ // If any of the directories is missing, create it.
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ RaidFileDiscSet rdiscSet(rcontroller.GetDiscSet(mDiscSetNumber));
+
+ if(!rdiscSet.IsNonRaidSet())
+ {
+ unsigned int numDiscs = rdiscSet.size();
+
+ for(unsigned int l = 0; l < numDiscs; ++l)
+ {
+ // build name
+ std::string dn(rdiscSet[l] + DIRECTORY_SEPARATOR + rDirName);
+ struct stat st;
+
+ if(stat(dn.c_str(), &st) != 0 && errno == ENOENT)
+ {
+ if(mkdir(dn.c_str(), 0755) != 0)
+ {
+ THROW_SYS_FILE_ERROR("Failed to "
+ "create missing RaidFile "
+ "directory", dn,
+ RaidFileException, OSError);
+ }
+ }
+ }
+ }
+
+>>>>>>> 0.12
std::vector<std::string> dirs;
RaidFileRead::ReadDirectoryContents(mDiscSetNumber, rDirName,
RaidFileRead::DirReadType_DirsOnly, dirs);
@@ -318,7 +391,11 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
}
else
{
+<<<<<<< HEAD
BOX_WARNING("Spurious or invalid directory " <<
+=======
+ BOX_ERROR("Spurious or invalid directory " <<
+>>>>>>> 0.12
rDirName << DIRECTORY_SEPARATOR <<
(*i) << " found, " <<
(mFixErrors?"deleting":"delete manually"));
@@ -335,7 +412,12 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
//
// Function
// Name: BackupStoreCheck::CheckObjectsDir(int64_t)
+<<<<<<< HEAD
// Purpose: Check all the files within this directory which has the given starting ID.
+=======
+// Purpose: Check all the files within this directory which has
+// the given starting ID.
+>>>>>>> 0.12
// Created: 22/4/04
//
// --------------------------------------------------------------------------
@@ -383,6 +465,7 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
// Filename is valid, mark as existing
idsPresent[n] = true;
}
+<<<<<<< HEAD
else
{
// info file in root dir is OK!
@@ -390,12 +473,31 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
{
fileOK = false;
}
+=======
+ // No other files should be present in subdirectories
+ else if(StartID != 0)
+ {
+ fileOK = false;
+ }
+ // info and refcount databases are OK in the root directory
+ else if(*i == "info" || *i == "refcount.db")
+ {
+ fileOK = true;
+ }
+ else
+ {
+ fileOK = false;
+>>>>>>> 0.12
}
if(!fileOK)
{
// Unexpected or bad file, delete it
+<<<<<<< HEAD
BOX_WARNING("Spurious file " << dirName <<
+=======
+ BOX_ERROR("Spurious file " << dirName <<
+>>>>>>> 0.12
DIRECTORY_SEPARATOR << (*i) << " found" <<
(mFixErrors?", deleting":""));
++mNumberErrorsFound;
@@ -418,7 +520,11 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
if(!CheckAndAddObject(StartID | i, dirName + leaf))
{
// File was bad, delete it
+<<<<<<< HEAD
BOX_WARNING("Corrupted file " << dirName <<
+=======
+ BOX_ERROR("Corrupted file " << dirName <<
+>>>>>>> 0.12
leaf << " found" <<
(mFixErrors?", deleting":""));
++mNumberErrorsFound;
@@ -436,6 +542,7 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
// --------------------------------------------------------------------------
//
// Function
+<<<<<<< HEAD
// Name: BackupStoreCheck::CheckAndAddObject(int64_t, const std::string &)
// Purpose: Check a specific object and add it to the list if it's OK -- if
// there are any errors with the reading, return false and it'll be deleted.
@@ -443,6 +550,18 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
//
// --------------------------------------------------------------------------
bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rFilename)
+=======
+// Name: BackupStoreCheck::CheckAndAddObject(int64_t,
+// const std::string &)
+// Purpose: Check a specific object and add it to the list
+// if it's OK. If there are any errors with the
+// reading, return false and it'll be deleted.
+// Created: 21/4/04
+//
+// --------------------------------------------------------------------------
+bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
+ const std::string &rFilename)
+>>>>>>> 0.12
{
// Info on object...
bool isFile = true;
@@ -452,10 +571,19 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rF
try
{
// Open file
+<<<<<<< HEAD
std::auto_ptr<RaidFileRead> file(RaidFileRead::Open(mDiscSetNumber, rFilename));
size = file->GetDiscUsageInBlocks();
// Read in first four bytes -- don't have to worry about retrying if not all bytes read as is RaidFile
+=======
+ std::auto_ptr<RaidFileRead> file(
+ RaidFileRead::Open(mDiscSetNumber, rFilename));
+ size = file->GetDiscUsageInBlocks();
+
+ // Read in first four bytes -- don't have to worry about
+ // retrying if not all bytes read as is RaidFile
+>>>>>>> 0.12
uint32_t signature;
if(file->Read(&signature, sizeof(signature)) != sizeof(signature))
{
@@ -486,6 +614,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rF
return false;
break;
}
+<<<<<<< HEAD
// Add to usage counts
mBlocksUsed += size;
@@ -493,6 +622,8 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rF
{
mBlocksInDirectories += size;
}
+=======
+>>>>>>> 0.12
}
catch(...)
{
@@ -505,10 +636,64 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rF
{
return false;
}
+<<<<<<< HEAD
// Add to list of IDs known about
AddID(ObjectID, containerID, size, isFile);
+=======
+
+ // Add to list of IDs known about
+ AddID(ObjectID, containerID, size, isFile);
+
+ // Add to usage counts
+ mBlocksUsed += size;
+ if(!isFile)
+ {
+ mBlocksInDirectories += size;
+ }
+
+ // If it looks like a good object, and it's non-RAID, and
+ // this is a RAID set, then convert it to RAID.
+
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ RaidFileDiscSet rdiscSet(rcontroller.GetDiscSet(mDiscSetNumber));
+ if(!rdiscSet.IsNonRaidSet())
+ {
+ // See if the file exists
+ RaidFileUtil::ExistType existance =
+ RaidFileUtil::RaidFileExists(rdiscSet, rFilename);
+ if(existance == RaidFileUtil::NonRaid)
+ {
+ BOX_WARNING("Found non-RAID write file in RAID set" <<
+ (mFixErrors?", transforming to RAID: ":"") <<
+ (mFixErrors?rFilename:""));
+ if(mFixErrors)
+ {
+ RaidFileWrite write(mDiscSetNumber, rFilename);
+ write.TransformToRaidStorage();
+ }
+ }
+ else if(existance == RaidFileUtil::AsRaidWithMissingReadable)
+ {
+ BOX_WARNING("Found damaged but repairable RAID file" <<
+ (mFixErrors?", repairing: ":"") <<
+ (mFixErrors?rFilename:""));
+ if(mFixErrors)
+ {
+ std::auto_ptr<RaidFileRead> read(
+ RaidFileRead::Open(mDiscSetNumber,
+ rFilename));
+ RaidFileWrite write(mDiscSetNumber, rFilename);
+ write.Open(true /* overwrite */);
+ read->CopyStreamTo(write);
+ read.reset();
+ write.Commit(true /* transform to RAID */);
+ }
+ }
+ }
+
+>>>>>>> 0.12
// Report success
return true;
}
@@ -518,13 +703,23 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID, const std::string &rF
//
// Function
// Name: BackupStoreCheck::CheckFile(int64_t, IOStream &)
+<<<<<<< HEAD
// Purpose: Do check on file, return original container ID if OK, or -1 on error
+=======
+// Purpose: Do check on file, return original container ID
+// if OK, or -1 on error
+>>>>>>> 0.12
// Created: 22/4/04
//
// --------------------------------------------------------------------------
int64_t BackupStoreCheck::CheckFile(int64_t ObjectID, IOStream &rStream)
{
+<<<<<<< HEAD
// Check that it's not the root directory ID. Having a file as the root directory would be bad.
+=======
+ // Check that it's not the root directory ID. Having a file as
+ // the root directory would be bad.
+>>>>>>> 0.12
if(ObjectID == BACKUPSTORE_ROOT_DIRECTORY_ID)
{
// Get that dodgy thing deleted!
@@ -534,7 +729,12 @@ int64_t BackupStoreCheck::CheckFile(int64_t ObjectID, IOStream &rStream)
// Check the format of the file, and obtain the container ID
int64_t originalContainerID = -1;
+<<<<<<< HEAD
if(!BackupStoreFile::VerifyEncodedFileFormat(rStream, 0 /* don't want diffing from ID */,
+=======
+ if(!BackupStoreFile::VerifyEncodedFileFormat(rStream,
+ 0 /* don't want diffing from ID */,
+>>>>>>> 0.12
&originalContainerID))
{
// Didn't verify
@@ -549,7 +749,12 @@ int64_t BackupStoreCheck::CheckFile(int64_t ObjectID, IOStream &rStream)
//
// Function
// Name: BackupStoreCheck::CheckDirInitial(int64_t, IOStream &)
+<<<<<<< HEAD
// Purpose: Do initial check on directory, return container ID if OK, or -1 on error
+=======
+// Purpose: Do initial check on directory, return container ID
+// if OK, or -1 on error
+>>>>>>> 0.12
// Created: 22/4/04
//
// --------------------------------------------------------------------------
@@ -588,7 +793,16 @@ void BackupStoreCheck::CheckDirectories()
// This phase will check all the files in the directories, make
// a note of all directories which are missing, and do initial fixing.
+<<<<<<< HEAD
// Scan all objects
+=======
+ // The root directory is not contained inside another directory, so
+ // it has no directory entry to scan, but we have to count it
+ // somewhere, so we'll count it here.
+ mNumDirectories++;
+
+ // Scan all objects.
+>>>>>>> 0.12
for(Info_t::const_iterator i(mInfo.begin()); i != mInfo.end(); ++i)
{
IDBlock *pblock = i->second;
@@ -609,6 +823,7 @@ void BackupStoreCheck::CheckDirectories()
}
// Flag for modifications
+<<<<<<< HEAD
bool isModified = false;
// Check for validity
@@ -625,10 +840,43 @@ void BackupStoreCheck::CheckDirectories()
// Go through, and check that everything in that directory exists and is valid
std::vector<int64_t> toDelete;
+=======
+ bool isModified = CheckDirectory(dir);
+
+ // Check the directory again, now that entries have been removed
+ if(dir.CheckAndFix())
+ {
+ // Wasn't quite right, and has been modified
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
+ " was still bad after all checks");
+ ++mNumberErrorsFound;
+ isModified = true;
+ }
+ else if(isModified)
+ {
+ BOX_INFO("Directory ID " <<
+ BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
+ " was OK after fixing");
+ }
+
+ if(isModified && mFixErrors)
+ {
+ BOX_WARNING("Writing modified directory to disk: " <<
+ BOX_FORMAT_OBJECTID(pblock->mID[e]));
+ RaidFileWrite fixed(mDiscSetNumber, filename);
+ fixed.Open(true /* allow overwriting */);
+ dir.WriteToStream(fixed);
+ fixed.Commit(true /* convert to raid representation now */);
+ }
+
+ // Count valid entries
+>>>>>>> 0.12
BackupStoreDirectory::Iterator i(dir);
BackupStoreDirectory::Entry *en = 0;
while((en = i.Next()) != 0)
{
+<<<<<<< HEAD
// Lookup the item
int32_t iIndex;
IDBlock *piBlock = LookupID(en->GetObjectID(), iIndex);
@@ -773,4 +1021,238 @@ void BackupStoreCheck::CheckDirectories()
}
+=======
+ int32_t iIndex;
+ IDBlock *piBlock = LookupID(en->GetObjectID(), iIndex);
+
+ ASSERT(piBlock != 0 ||
+ mDirsWhichContainLostDirs.find(en->GetObjectID())
+ != mDirsWhichContainLostDirs.end());
+ if (piBlock)
+ {
+ // Normally it would exist and this
+ // check would not be necessary, but
+ // we might have missing directories
+ // that we will recreate later.
+ // cf mDirsWhichContainLostDirs.
+ uint8_t iflags = GetFlags(piBlock, iIndex);
+ SetFlags(piBlock, iIndex, iflags | Flags_IsContained);
+ }
+
+ if(en->IsDir())
+ {
+ mNumDirectories++;
+ }
+ else if(!en->IsFile())
+ {
+ BOX_TRACE("Not counting object " <<
+ BOX_FORMAT_OBJECTID(en->GetObjectID()) <<
+ " with flags " << en->GetFlags());
+ }
+ else // it's a good file, add to sizes
+ if(en->IsOld() && en->IsDeleted())
+ {
+ BOX_WARNING("File " <<
+ BOX_FORMAT_OBJECTID(en->GetObjectID()) <<
+ " is both old and deleted, "
+ "this should not happen!");
+ }
+ else if(en->IsOld())
+ {
+ mNumFiles++;
+ mNumOldFiles++;
+ mBlocksInOldFiles += en->GetSizeInBlocks();
+ }
+ else if(en->IsDeleted())
+ {
+ mNumFiles++;
+ mNumDeletedFiles++;
+ mBlocksInDeletedFiles += en->GetSizeInBlocks();
+ }
+ else
+ {
+ mNumFiles++;
+ mBlocksInCurrentFiles += en->GetSizeInBlocks();
+ }
+ }
+ }
+ }
+ }
+}
+
+bool BackupStoreCheck::CheckDirectory(BackupStoreDirectory& dir)
+{
+ bool restart = true;
+ bool isModified = false;
+
+ while(restart)
+ {
+ std::vector<int64_t> toDelete;
+ restart = false;
+
+ // Check for validity
+ if(dir.CheckAndFix())
+ {
+ // Wasn't quite right, and has been modified
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(dir.GetObjectID()) <<
+ " had invalid entries" <<
+ (mFixErrors ? ", fixed" : ""));
+ ++mNumberErrorsFound;
+ isModified = true;
+ }
+
+ // Go through, and check that everything in that directory exists and is valid
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next()) != 0)
+ {
+ // Lookup the item
+ int32_t iIndex;
+ IDBlock *piBlock = LookupID(en->GetObjectID(), iIndex);
+ bool badEntry = false;
+ if(piBlock != 0)
+ {
+ badEntry = !CheckDirectoryEntry(*en,
+ dir.GetObjectID(), isModified);
+ }
+ // Item can't be found. Is it a directory?
+ else if(en->IsDir())
+ {
+ // Store the directory for later attention
+ mDirsWhichContainLostDirs[en->GetObjectID()] =
+ dir.GetObjectID();
+ }
+ else
+ {
+ // Just remove the entry
+ badEntry = true;
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(dir.GetObjectID()) <<
+ " references object " <<
+ BOX_FORMAT_OBJECTID(en->GetObjectID()) <<
+ " which does not exist.");
+ ++mNumberErrorsFound;
+ }
+
+ // Is this entry worth keeping?
+ if(badEntry)
+ {
+ toDelete.push_back(en->GetObjectID());
+ }
+ }
+
+ if(toDelete.size() > 0)
+ {
+ // Delete entries from directory
+ for(std::vector<int64_t>::const_iterator d(toDelete.begin()); d != toDelete.end(); ++d)
+ {
+ BOX_ERROR("Removing directory entry " <<
+ BOX_FORMAT_OBJECTID(*d) << " from "
+ "directory " <<
+ BOX_FORMAT_OBJECTID(dir.GetObjectID()));
+ ++mNumberErrorsFound;
+ dir.DeleteEntry(*d);
+ }
+
+ // Mark as modified
+ restart = true;
+ isModified = true;
+
+ // Errors found
+ }
+ }
+
+ return isModified;
+}
+
+bool BackupStoreCheck::CheckDirectoryEntry(BackupStoreDirectory::Entry& rEntry,
+ int64_t DirectoryID, bool& rIsModified)
+{
+ int32_t IndexInDirBlock;
+ IDBlock *piBlock = LookupID(rEntry.GetObjectID(), IndexInDirBlock);
+ ASSERT(piBlock != 0);
+
+ uint8_t iflags = GetFlags(piBlock, IndexInDirBlock);
+ bool badEntry = false;
+
+ // Is the type the same?
+ if(((iflags & Flags_IsDir) == Flags_IsDir) != rEntry.IsDir())
+ {
+ // Entry is of wrong type
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(DirectoryID) <<
+ " references object " <<
+ BOX_FORMAT_OBJECTID(rEntry.GetObjectID()) <<
+ " which has a different type than expected.");
+ badEntry = true;
+ ++mNumberErrorsFound;
+ }
+ // Check that the entry is not already contained.
+ else if(iflags & Flags_IsContained)
+ {
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(DirectoryID) <<
+ " references object " <<
+ BOX_FORMAT_OBJECTID(rEntry.GetObjectID()) <<
+ " which is already contained.");
+ badEntry = true;
+ ++mNumberErrorsFound;
+ }
+ else
+ {
+ // Not already contained by another directory.
+ // Don't set the flag until later, after we finish repairing
+ // the directory and removing all bad entries.
+
+ // Check that the container ID of the object is correct
+ if(piBlock->mContainer[IndexInDirBlock] != DirectoryID)
+ {
+ // Needs fixing...
+ if(iflags & Flags_IsDir)
+ {
+ // Add to will fix later list
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(rEntry.GetObjectID())
+ << " has wrong container ID.");
+ mDirsWithWrongContainerID.push_back(rEntry.GetObjectID());
+ ++mNumberErrorsFound;
+ }
+ else
+ {
+ // This is OK for files, they might move
+ BOX_NOTICE("File ID " <<
+ BOX_FORMAT_OBJECTID(rEntry.GetObjectID())
+ << " has different container ID, "
+ "probably moved");
+ }
+
+ // Fix entry for now
+ piBlock->mContainer[IndexInDirBlock] = DirectoryID;
+ }
+ }
+
+ // Check the object size, if it's OK and a file
+ if(!badEntry && !rEntry.IsDir())
+ {
+ if(rEntry.GetSizeInBlocks() != piBlock->mObjectSizeInBlocks[IndexInDirBlock])
+ {
+ // Wrong size, correct it.
+ rEntry.SetSizeInBlocks(piBlock->mObjectSizeInBlocks[IndexInDirBlock]);
+
+ // Mark as changed
+ rIsModified = true;
+ ++mNumberErrorsFound;
+
+ // Tell user
+ BOX_ERROR("Directory ID " <<
+ BOX_FORMAT_OBJECTID(DirectoryID) <<
+ " has wrong size for object " <<
+ BOX_FORMAT_OBJECTID(rEntry.GetObjectID()));
+ }
+ }
+
+ return !badEntry;
+}
+>>>>>>> 0.12
diff --git a/lib/backupstore/BackupStoreCheck.h b/lib/backupstore/BackupStoreCheck.h
index 1d5c1b1e..cddd8a4d 100644
--- a/lib/backupstore/BackupStoreCheck.h
+++ b/lib/backupstore/BackupStoreCheck.h
@@ -16,6 +16,11 @@
#include <set>
#include "NamedLock.h"
+<<<<<<< HEAD
+=======
+#include "BackupStoreDirectory.h"
+
+>>>>>>> 0.12
class IOStream;
class BackupStoreFilename;
@@ -26,9 +31,14 @@ The following problems can be fixed:
* Spurious files deleted
* Corrupted files deleted
* Root ID as file, deleted
+<<<<<<< HEAD
* Dirs with wrong object id inside, deleted
* Direcetory entries pointing to non-existant files, deleted
* Doubly references files have second reference deleted
+=======
+ * Dirs with wrong object id in header, deleted
+ * Doubly referenced files have second reference deleted
+>>>>>>> 0.12
* Wrong directory container IDs fixed
* Missing root recreated
* Reattach files which exist, but aren't referenced
@@ -41,7 +51,13 @@ The following problems can be fixed:
* Inside directories,
- only one object per name has old version clear
- IDs aren't duplicated
+<<<<<<< HEAD
* Bad store info files regenerated
+=======
+ - entries pointing to non-existant files are deleted
+ - patches depending on non-existent objects are deleted
+ * Bad store info and refcount files regenerated
+>>>>>>> 0.12
* Bad sizes of files in directories fixed
*/
@@ -82,6 +98,13 @@ public:
void Check();
bool ErrorsFound() {return mNumberErrorsFound > 0;}
+<<<<<<< HEAD
+=======
+ inline int64_t GetNumErrorsFound()
+ {
+ return mNumberErrorsFound;
+ }
+>>>>>>> 0.12
private:
enum
@@ -120,6 +143,12 @@ private:
int64_t CheckObjectsScanDir(int64_t StartID, int Level, const std::string &rDirName);
void CheckObjectsDir(int64_t StartID);
bool CheckAndAddObject(int64_t ObjectID, const std::string &rFilename);
+<<<<<<< HEAD
+=======
+ bool CheckDirectory(BackupStoreDirectory& dir);
+ bool CheckDirectoryEntry(BackupStoreDirectory::Entry& rEntry,
+ int64_t DirectoryID, bool& rIsModified);
+>>>>>>> 0.12
int64_t CheckFile(int64_t ObjectID, IOStream &rStream);
int64_t CheckDirInitial(int64_t ObjectID, IOStream &rStream);
@@ -156,11 +185,19 @@ private:
#else
#define DUMP_OBJECT_INFO
#endif
+<<<<<<< HEAD
+=======
+
+>>>>>>> 0.12
private:
std::string mStoreRoot;
int mDiscSetNumber;
int32_t mAccountID;
+<<<<<<< HEAD
+=======
+ std::string mAccountName;
+>>>>>>> 0.12
bool mFixErrors;
bool mQuiet;
@@ -179,7 +216,12 @@ private:
// List of stuff to fix
std::vector<BackupStoreCheck_ID_t> mDirsWithWrongContainerID;
// This is a map of lost dir ID -> existing dir ID
+<<<<<<< HEAD
std::map<BackupStoreCheck_ID_t, BackupStoreCheck_ID_t> mDirsWhichContainLostDirs;
+=======
+ std::map<BackupStoreCheck_ID_t, BackupStoreCheck_ID_t>
+ mDirsWhichContainLostDirs;
+>>>>>>> 0.12
// Set of extra directories added
std::set<BackupStoreCheck_ID_t> mDirsAdded;
@@ -190,9 +232,20 @@ private:
// Usage
int64_t mBlocksUsed;
+<<<<<<< HEAD
+ int64_t mBlocksInOldFiles;
+ int64_t mBlocksInDeletedFiles;
+ int64_t mBlocksInDirectories;
+=======
+ int64_t mBlocksInCurrentFiles;
int64_t mBlocksInOldFiles;
int64_t mBlocksInDeletedFiles;
int64_t mBlocksInDirectories;
+ int64_t mNumFiles;
+ int64_t mNumOldFiles;
+ int64_t mNumDeletedFiles;
+ int64_t mNumDirectories;
+>>>>>>> 0.12
};
#endif // BACKUPSTORECHECK__H
diff --git a/lib/backupstore/BackupStoreCheck2.cpp b/lib/backupstore/BackupStoreCheck2.cpp
index bcb5c5e9..341ac524 100644
--- a/lib/backupstore/BackupStoreCheck2.cpp
+++ b/lib/backupstore/BackupStoreCheck2.cpp
@@ -12,6 +12,7 @@
#include <stdio.h>
#include <string.h>
+<<<<<<< HEAD
#include "BackupStoreCheck.h"
#include "StoreStructure.h"
#include "RaidFileRead.h"
@@ -23,6 +24,20 @@
#include "BackupStoreDirectory.h"
#include "BackupStoreConstants.h"
#include "BackupStoreInfo.h"
+=======
+#include "autogen_BackupStoreException.h"
+#include "BackupStoreCheck.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreInfo.h"
+#include "BackupStoreObjectMagic.h"
+#include "MemBlockStream.h"
+#include "RaidFileRead.h"
+#include "RaidFileWrite.h"
+#include "StoreStructure.h"
+>>>>>>> 0.12
#include "MemLeakFindOn.h"
@@ -137,7 +152,11 @@ void BackupStoreCheck::CheckUnattachedObjects()
if((flags & Flags_IsContained) == 0)
{
// Unattached object...
+<<<<<<< HEAD
BOX_WARNING("Object " <<
+=======
+ BOX_ERROR("Object " <<
+>>>>>>> 0.12
BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
" is unattached.");
++mNumberErrorsFound;
@@ -157,6 +176,10 @@ void BackupStoreCheck::CheckUnattachedObjects()
int64_t diffFromObjectID = 0;
std::string filename;
StoreStructure::MakeObjectFilename(pblock->mID[e], mStoreRoot, mDiscSetNumber, filename, false /* don't attempt to make sure the dir exists */);
+<<<<<<< HEAD
+=======
+
+>>>>>>> 0.12
// The easiest way to do this is to verify it again. Not such a bad penalty, because
// this really shouldn't be done very often.
{
@@ -382,7 +405,11 @@ void BackupStoreDirectoryFixer::InsertObject(int64_t ObjectID, bool IsDirectory,
}
// Add a new entry in an appropriate place
+<<<<<<< HEAD
mDirectory.AddUnattactedObject(objectStoreFilename, modTime,
+=======
+ mDirectory.AddUnattachedObject(objectStoreFilename, modTime,
+>>>>>>> 0.12
ObjectID, sizeInBlocks,
IsDirectory?(BackupStoreDirectory::Entry::Flags_Dir):(BackupStoreDirectory::Entry::Flags_File));
}
@@ -573,6 +600,7 @@ void BackupStoreCheck::FixDirsWithLostDirs()
void BackupStoreCheck::WriteNewStoreInfo()
{
// Attempt to load the existing store info file
+<<<<<<< HEAD
std::auto_ptr<BackupStoreInfo> poldInfo;
try
{
@@ -584,12 +612,37 @@ void BackupStoreCheck::WriteNewStoreInfo()
++mNumberErrorsFound;
}
+=======
+ std::auto_ptr<BackupStoreInfo> pOldInfo;
+ try
+ {
+ pOldInfo.reset(BackupStoreInfo::Load(mAccountID, mStoreRoot, mDiscSetNumber, true /* read only */).release());
+ mAccountName = pOldInfo->GetAccountName();
+ }
+ catch(...)
+ {
+ BOX_ERROR("Load of existing store info failed, regenerating.");
+ ++mNumberErrorsFound;
+ }
+
+ BOX_NOTICE("Total files: " << mNumFiles << " (of which "
+ "old files: " << mNumOldFiles << ", "
+ "deleted files: " << mNumDeletedFiles << "), "
+ "directories: " << mNumDirectories);
+
+>>>>>>> 0.12
// Minimum soft and hard limits
int64_t minSoft = ((mBlocksUsed * 11) / 10) + 1024;
int64_t minHard = ((minSoft * 11) / 10) + 1024;
// Need to do anything?
+<<<<<<< HEAD
if(poldInfo.get() != 0 && mNumberErrorsFound == 0 && poldInfo->GetAccountID() == mAccountID)
+=======
+ if(pOldInfo.get() != 0 &&
+ mNumberErrorsFound == 0 &&
+ pOldInfo->GetAccountID() == mAccountID)
+>>>>>>> 0.12
{
// Leave the store info as it is, no need to alter it because nothing really changed,
// and the only essential thing was that the account ID was correct, which is was.
@@ -601,6 +654,7 @@ void BackupStoreCheck::WriteNewStoreInfo()
// Work out the new limits
int64_t softLimit = minSoft;
int64_t hardLimit = minHard;
+<<<<<<< HEAD
if(poldInfo.get() != 0 && poldInfo->GetBlocksSoftLimit() > minSoft)
{
softLimit = poldInfo->GetBlocksSoftLimit();
@@ -616,6 +670,25 @@ void BackupStoreCheck::WriteNewStoreInfo()
else
{
BOX_WARNING("Hard limit for account changed to ensure housekeeping doesn't delete files on next run.");
+=======
+ if(pOldInfo.get() != 0 && pOldInfo->GetBlocksSoftLimit() > minSoft)
+ {
+ softLimit = pOldInfo->GetBlocksSoftLimit();
+ }
+ else
+ {
+ BOX_WARNING("Soft limit for account changed to ensure "
+ "housekeeping doesn't delete files on next run.");
+ }
+ if(pOldInfo.get() != 0 && pOldInfo->GetBlocksHardLimit() > minHard)
+ {
+ hardLimit = pOldInfo->GetBlocksHardLimit();
+ }
+ else
+ {
+ BOX_WARNING("Hard limit for account changed to ensure "
+ "housekeeping doesn't delete files on next run.");
+>>>>>>> 0.12
}
// Object ID
@@ -626,17 +699,51 @@ void BackupStoreCheck::WriteNewStoreInfo()
}
// Build a new store info
+<<<<<<< HEAD
+ std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::CreateForRegeneration(
+ mAccountID,
+=======
+ std::auto_ptr<MemBlockStream> extra_data;
+ if(pOldInfo.get())
+ {
+ extra_data.reset(new MemBlockStream(pOldInfo->GetExtraData()));
+ }
+ else
+ {
+ extra_data.reset(new MemBlockStream(/* empty */));
+ }
std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::CreateForRegeneration(
mAccountID,
+ mAccountName,
+>>>>>>> 0.12
mStoreRoot,
mDiscSetNumber,
lastObjID,
mBlocksUsed,
+<<<<<<< HEAD
+=======
+ mBlocksInCurrentFiles,
+>>>>>>> 0.12
mBlocksInOldFiles,
mBlocksInDeletedFiles,
mBlocksInDirectories,
softLimit,
+<<<<<<< HEAD
hardLimit));
+=======
+ hardLimit,
+ (pOldInfo.get() ? pOldInfo->IsAccountEnabled() : true),
+ *extra_data));
+ info->AdjustNumFiles(mNumFiles);
+ info->AdjustNumOldFiles(mNumOldFiles);
+ info->AdjustNumDeletedFiles(mNumDeletedFiles);
+ info->AdjustNumDirectories(mNumDirectories);
+
+ if(pOldInfo.get())
+ {
+ mNumberErrorsFound += info->ReportChangesTo(*pOldInfo);
+ }
+>>>>>>> 0.12
// Save to disc?
if(mFixErrors)
@@ -663,7 +770,16 @@ bool BackupStoreDirectory::CheckAndFix()
bool changed = false;
// Check that if a file depends on a new version, that version is in this directory
+<<<<<<< HEAD
+ {
+=======
+ bool restart;
+
+ do
{
+ restart = false;
+
+>>>>>>> 0.12
std::vector<Entry*>::iterator i(mEntries.begin());
for(; i != mEntries.end(); ++i)
{
@@ -674,7 +790,11 @@ bool BackupStoreDirectory::CheckAndFix()
if(newerEn == 0)
{
// Depends on something, but it isn't there.
+<<<<<<< HEAD
BOX_TRACE("Entry id " << FMT_i <<
+=======
+ BOX_WARNING("Entry id " << FMT_i <<
+>>>>>>> 0.12
" removed because depends "
"on newer version " <<
FMT_OID(dependsNewer) <<
@@ -684,11 +804,20 @@ bool BackupStoreDirectory::CheckAndFix()
delete *i;
mEntries.erase(i);
+<<<<<<< HEAD
// Start again at the beginning of the vector, the iterator is now invalid
i = mEntries.begin();
// Mark as changed
changed = true;
+=======
+ // Mark as changed
+ changed = true;
+
+ // Start again at the beginning of the vector, the iterator is now invalid
+ restart = true;
+ break;
+>>>>>>> 0.12
}
else
{
@@ -710,6 +839,10 @@ bool BackupStoreDirectory::CheckAndFix()
}
}
}
+<<<<<<< HEAD
+=======
+ while(restart);
+>>>>>>> 0.12
// Check that if a file has a dependency marked, it exists, and remove it if it doesn't
{
@@ -834,7 +967,11 @@ bool BackupStoreDirectory::CheckAndFix()
// erase the thing from the list
Entry *pentry = (*i);
mEntries.erase(i);
+<<<<<<< HEAD
+=======
+
+>>>>>>> 0.12
// And delete the entry object
delete pentry;
@@ -852,12 +989,20 @@ bool BackupStoreDirectory::CheckAndFix()
// --------------------------------------------------------------------------
//
// Function
+<<<<<<< HEAD
// Name: BackupStoreDirectory::AddUnattactedObject(...)
+=======
+// Name: BackupStoreDirectory::AddUnattachedObject(...)
+>>>>>>> 0.12
// Purpose: Adds an object which is currently unattached. Assume that CheckAndFix() will be called afterwards.
// Created: 22/4/04
//
// --------------------------------------------------------------------------
+<<<<<<< HEAD
void BackupStoreDirectory::AddUnattactedObject(const BackupStoreFilename &rName,
+=======
+void BackupStoreDirectory::AddUnattachedObject(const BackupStoreFilename &rName,
+>>>>>>> 0.12
box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks, int16_t Flags)
{
Entry *pnew = new Entry(rName, ModificationTime, ObjectID, SizeInBlocks, Flags,
diff --git a/lib/backupstore/BackupStoreCheckData.cpp b/lib/backupstore/BackupStoreCheckData.cpp
index fed0c3f1..c89b5082 100644
--- a/lib/backupstore/BackupStoreCheckData.cpp
+++ b/lib/backupstore/BackupStoreCheckData.cpp
@@ -65,16 +65,23 @@ void BackupStoreCheck::AddID(BackupStoreCheck_ID_t ID,
if(mpInfoLastBlock == 0 || mInfoLastBlockEntries >= BACKUPSTORECHECK_BLOCK_SIZE)
{
// No. Allocate a new one
+<<<<<<< HEAD
IDBlock *pblk = (IDBlock*)::malloc(sizeof(IDBlock));
+=======
+ IDBlock *pblk = (IDBlock*)calloc(1, sizeof(IDBlock));
+>>>>>>> 0.12
if(pblk == 0)
{
throw std::bad_alloc();
}
+<<<<<<< HEAD
// Zero all the flags entries
for(int z = 0; z < (BACKUPSTORECHECK_BLOCK_SIZE * Flags__NumFlags / Flags__NumItemsPerEntry); ++z)
{
pblk->mFlags[z] = 0;
}
+=======
+>>>>>>> 0.12
// Store in map
mInfo[ID] = pblk;
// Allocated and stored OK, setup for use
@@ -141,8 +148,13 @@ BackupStoreCheck::IDBlock *BackupStoreCheck::LookupID(BackupStoreCheck_ID_t ID,
pblock = ib->second;
}
+<<<<<<< HEAD
ASSERT(pblock != 0);
if(pblock == 0) return 0;
+=======
+ if(pblock == 0) return 0;
+ ASSERT(pblock != 0);
+>>>>>>> 0.12
// How many entries are there in the block
int32_t bentries = (pblock == mpInfoLastBlock)?mInfoLastBlockEntries:BACKUPSTORECHECK_BLOCK_SIZE;
diff --git a/lib/backupstore/BackupStoreConfigVerify.cpp b/lib/backupstore/BackupStoreConfigVerify.cpp
index cc6efcf5..c2344634 100644
--- a/lib/backupstore/BackupStoreConfigVerify.cpp
+++ b/lib/backupstore/BackupStoreConfigVerify.cpp
@@ -38,6 +38,7 @@ static const ConfigurationVerifyKey verifyrootkeys[] =
ConfigTest_Exists | ConfigTest_IsInt),
ConfigurationVerifyKey("ExtendedLogging", ConfigTest_IsBool, false),
// make value "yes" to enable in config file
+<<<<<<< HEAD
#ifdef WIN32
ConfigurationVerifyKey("RaidFileConf", ConfigTest_LastEntry)
@@ -45,6 +46,9 @@ static const ConfigurationVerifyKey verifyrootkeys[] =
ConfigurationVerifyKey("RaidFileConf", ConfigTest_LastEntry,
BOX_FILE_RAIDFILE_DEFAULT_CONFIG)
#endif
+=======
+ ConfigurationVerifyKey("RaidFileConf", ConfigTest_LastEntry)
+>>>>>>> 0.12
};
const ConfigurationVerify BackupConfigFileVerify =
diff --git a/lib/backupstore/BackupStoreConstants.h b/lib/backupstore/BackupStoreConstants.h
new file mode 100644
index 00000000..2c33fd8f
--- /dev/null
+++ b/lib/backupstore/BackupStoreConstants.h
@@ -0,0 +1,44 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreContants.h
+// Purpose: constants for the backup system
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTORECONSTANTS__H
+#define BACKUPSTORECONSTANTS__H
+
+#define BACKUPSTORE_ROOT_DIRECTORY_ID 1
+
+#define BACKUP_STORE_SERVER_VERSION 1
+
+// Minimum size for a chunk to be compressed
+#define BACKUP_FILE_MIN_COMPRESSED_CHUNK_SIZE 256
+
+// min and max sizes for blocks
+#define BACKUP_FILE_MIN_BLOCK_SIZE 4096
+#define BACKUP_FILE_MAX_BLOCK_SIZE (512*1024)
+
+// Increase the block size if there are more than this number of blocks
+#define BACKUP_FILE_INCREASE_BLOCK_SIZE_AFTER 4096
+
+// Avoid creating blocks smaller than this
+#define BACKUP_FILE_AVOID_BLOCKS_LESS_THAN 128
+
+// Maximum number of sizes to do an rsync-like scan for
+#define BACKUP_FILE_DIFF_MAX_BLOCK_SIZES 64
+
+// When doing rsync scans, do not scan for blocks smaller than
+#define BACKUP_FILE_DIFF_MIN_BLOCK_SIZE 128
+
+// A limit to stop diffing running out of control: If more than this
+// times the number of blocks in the original index are found, stop
+// looking. This stops really bad cases of diffing files containing
+// all the same byte using huge amounts of memory and processor time.
+// This is a multiple of the number of blocks in the diff from file.
+#define BACKUP_FILE_DIFF_MAX_BLOCK_FIND_MULTIPLE 4096
+
+#endif // BACKUPSTORECONSTANTS__H
+
diff --git a/lib/backupstore/BackupStoreContext.cpp b/lib/backupstore/BackupStoreContext.cpp
new file mode 100644
index 00000000..2c98b1d7
--- /dev/null
+++ b/lib/backupstore/BackupStoreContext.cpp
@@ -0,0 +1,1808 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreContext.cpp
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+
+#include "BackupConstants.h"
+#include "BackupStoreContext.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreInfo.h"
+#include "BackupStoreObjectMagic.h"
+#include "BufferedStream.h"
+#include "BufferedWriteStream.h"
+#include "FileStream.h"
+#include "InvisibleTempFileStream.h"
+#include "RaidFileController.h"
+#include "RaidFileRead.h"
+#include "RaidFileWrite.h"
+#include "StoreStructure.h"
+
+#include "MemLeakFindOn.h"
+
+
+// Maximum number of directories to keep in the cache
+// When the cache is bigger than this, everything gets
+// deleted.
+#ifdef BOX_RELEASE_BUILD
+ #define MAX_CACHE_SIZE 32
+#else
+ #define MAX_CACHE_SIZE 2
+#endif
+
+// Allow the housekeeping process 4 seconds to release an account
+#define MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT 4
+
+// Maximum amount of store info updates before it's actually saved to disc.
+#define STORE_INFO_SAVE_DELAY 96
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::BackupStoreContext()
+// Purpose: Constructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupStoreContext::BackupStoreContext(int32_t ClientID,
+ HousekeepingInterface &rDaemon, const std::string& rConnectionDetails)
+ : mConnectionDetails(rConnectionDetails),
+ mClientID(ClientID),
+ mrDaemon(rDaemon),
+ mProtocolPhase(Phase_START),
+ mClientHasAccount(false),
+ mStoreDiscSet(-1),
+ mReadOnly(true),
+ mSaveStoreInfoDelay(STORE_INFO_SAVE_DELAY),
+ mpTestHook(NULL)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::~BackupStoreContext()
+// Purpose: Destructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupStoreContext::~BackupStoreContext()
+{
+ // Delete the objects in the cache
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ {
+ delete (i->second);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::CleanUp()
+// Purpose: Clean up after a connection
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::CleanUp()
+{
+ // Make sure the store info is saved, if it has been loaded, isn't read only and has been modified
+ if(mapStoreInfo.get() && !(mapStoreInfo->IsReadOnly()) &&
+ mapStoreInfo->IsModified())
+ {
+ mapStoreInfo->Save();
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::ReceivedFinishCommand()
+// Purpose: Called when the finish command is received by the protocol
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::ReceivedFinishCommand()
+{
+ if(!mReadOnly && mapStoreInfo.get())
+ {
+ // Save the store info, not delayed
+ SaveStoreInfo(false);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::AttemptToGetWriteLock()
+// Purpose: Attempt to get a write lock for the store, and if so, unset the read only flags
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::AttemptToGetWriteLock()
+{
+ // Make the filename of the write lock file
+ std::string writeLockFile;
+ StoreStructure::MakeWriteLockFilename(mStoreRoot, mStoreDiscSet, writeLockFile);
+
+ // Request the lock
+ bool gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
+
+ if(!gotLock)
+ {
+ // The housekeeping process might have the thing open -- ask it to stop
+ char msg[256];
+ int msgLen = sprintf(msg, "r%x\n", mClientID);
+ // Send message
+ mrDaemon.SendMessageToHousekeepingProcess(msg, msgLen);
+
+ // Then try again a few times
+ int tries = MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT;
+ do
+ {
+ ::sleep(1 /* second */);
+ --tries;
+ gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
+
+ } while(!gotLock && tries > 0);
+ }
+
+ if(gotLock)
+ {
+ // Got the lock, mark as not read only
+ mReadOnly = false;
+ }
+
+ return gotLock;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::LoadStoreInfo()
+// Purpose: Load the store info from disc
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::LoadStoreInfo()
+{
+ if(mapStoreInfo.get() != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoAlreadyLoaded)
+ }
+
+ // Load it up!
+ std::auto_ptr<BackupStoreInfo> i(BackupStoreInfo::Load(mClientID, mStoreRoot, mStoreDiscSet, mReadOnly));
+
+ // Check it
+ if(i->GetAccountID() != mClientID)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoForWrongAccount)
+ }
+
+ // Keep the pointer to it
+ mapStoreInfo = i;
+
+ BackupStoreAccountDatabase::Entry account(mClientID, mStoreDiscSet);
+
+ // try to load the reference count database
+ try
+ {
+ mapRefCount = BackupStoreRefCountDatabase::Load(account, false);
+ }
+ catch(BoxException &e)
+ {
+ BOX_WARNING("Reference count database is missing or corrupted, "
+ "creating a new one, expect housekeeping to find and "
+ "fix problems with reference counts later.");
+
+ BackupStoreRefCountDatabase::CreateForRegeneration(account);
+ mapRefCount = BackupStoreRefCountDatabase::Load(account, false);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::SaveStoreInfo(bool)
+// Purpose: Potentially delayed saving of the store info
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::SaveStoreInfo(bool AllowDelay)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Can delay saving it a little while?
+ if(AllowDelay)
+ {
+ --mSaveStoreInfoDelay;
+ if(mSaveStoreInfoDelay > 0)
+ {
+ return;
+ }
+ }
+
+ // Want to save now
+ mapStoreInfo->Save();
+
+ // Set count for next delay
+ mSaveStoreInfoDelay = STORE_INFO_SAVE_DELAY;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::MakeObjectFilename(int64_t, std::string &, bool)
+// Purpose: Create the filename of an object in the store, optionally creating the
+// containing directory if it doesn't already exist.
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::MakeObjectFilename(int64_t ObjectID, std::string &rOutput, bool EnsureDirectoryExists)
+{
+ // Delegate to utility function
+ StoreStructure::MakeObjectFilename(ObjectID, mStoreRoot, mStoreDiscSet, rOutput, EnsureDirectoryExists);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::GetDirectoryInternal(int64_t)
+// Purpose: Return a reference to a directory. Valid only until the
+// next time a function which affects directories is called.
+// Mainly this funciton, and creation of files.
+// Private version of this, which returns non-const directories.
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID)
+{
+ // Get the filename
+ std::string filename;
+ MakeObjectFilename(ObjectID, filename);
+
+ // Already in cache?
+ std::map<int64_t, BackupStoreDirectory*>::iterator item(mDirectoryCache.find(ObjectID));
+ if(item != mDirectoryCache.end())
+ {
+ // Check the revision ID of the file -- does it need refreshing?
+ int64_t revID = 0;
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename, &revID))
+ {
+ THROW_EXCEPTION(BackupStoreException, DirectoryHasBeenDeleted)
+ }
+
+ if(revID == item->second->GetRevisionID())
+ {
+ // Looks good... return the cached object
+ BOX_TRACE("Returning object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " from cache, modtime = " << revID);
+ return *(item->second);
+ }
+
+ BOX_TRACE("Refreshing object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " in cache, modtime changed from " <<
+ item->second->GetRevisionID() << " to " << revID);
+
+ // Delete this cached object
+ delete item->second;
+ mDirectoryCache.erase(item);
+ }
+
+ // Need to load it up
+
+ // First check to see if the cache is too big
+ if(mDirectoryCache.size() > MAX_CACHE_SIZE)
+ {
+ // Very simple. Just delete everything!
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ {
+ delete (i->second);
+ }
+ mDirectoryCache.clear();
+ }
+
+ // Get a RaidFileRead to read it
+ int64_t revID = 0;
+ std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename, &revID));
+ ASSERT(revID != 0);
+
+ // New directory object
+ std::auto_ptr<BackupStoreDirectory> dir(new BackupStoreDirectory);
+
+ // Read it from the stream, then set it's revision ID
+ BufferedStream buf(*objectFile);
+ dir->ReadFromStream(buf, IOStream::TimeOutInfinite);
+ dir->SetRevisionID(revID);
+
+ // Make sure the size of the directory is available for writing the dir back
+ int64_t dirSize = objectFile->GetDiscUsageInBlocks();
+ ASSERT(dirSize > 0);
+ dir->SetUserInfo1_SizeInBlocks(dirSize);
+
+ // Store in cache
+ BackupStoreDirectory *pdir = dir.release();
+ try
+ {
+ mDirectoryCache[ObjectID] = pdir;
+ }
+ catch(...)
+ {
+ delete pdir;
+ throw;
+ }
+
+ // Return it
+ return *pdir;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::AllocateObjectID()
+// Purpose: Allocate a new object ID, tolerant of failures to save store info
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+int64_t BackupStoreContext::AllocateObjectID()
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Given that the store info may not be saved for STORE_INFO_SAVE_DELAY
+ // times after it has been updated, this is a reasonable number of times
+ // to try for finding an unused ID.
+ // (Sizes used in the store info are fixed by the housekeeping process)
+ int retryLimit = (STORE_INFO_SAVE_DELAY * 2);
+
+ while(retryLimit > 0)
+ {
+ // Attempt to allocate an ID from the store
+ int64_t id = mapStoreInfo->AllocateObjectID();
+
+ // Generate filename
+ std::string filename;
+ MakeObjectFilename(id, filename);
+ // Check it doesn't exist
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename))
+ {
+ // Success!
+ return id;
+ }
+
+ // Decrement retry count, and try again
+ --retryLimit;
+
+ // Mark that the store info should be saved as soon as possible
+ mSaveStoreInfoDelay = 0;
+
+ BOX_WARNING("When allocating object ID, found that " <<
+ BOX_FORMAT_OBJECTID(id) << " is already in use");
+ }
+
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindUnusedIDDuringAllocation)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::AddFile(IOStream &, int64_t,
+// int64_t, int64_t, const BackupStoreFilename &, bool)
+// Purpose: Add a file to the store, from a given stream, into
+// a specified directory. Returns object ID of the new
+// file.
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
+ int64_t ModificationTime, int64_t AttributesHash,
+ int64_t DiffFromFileID, const BackupStoreFilename &rFilename,
+ bool MarkFileWithSameNameAsOldVersions)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // This is going to be a bit complex to make sure it copes OK
+ // with things going wrong.
+ // The only thing which isn't safe is incrementing the object ID
+ // and keeping the blocks used entirely accurate -- but these
+ // aren't big problems if they go horribly wrong. The sizes will
+ // be corrected the next time the account has a housekeeping run,
+ // and the object ID allocation code is tolerant of missed IDs.
+ // (the info is written lazily, so these are necessary)
+
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Allocate the next ID
+ int64_t id = AllocateObjectID();
+
+ // Stream the file to disc
+ std::string fn;
+ MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
+ int64_t newObjectBlocksUsed = 0;
+ RaidFileWrite *ppreviousVerStoreFile = 0;
+ bool reversedDiffIsCompletelyDifferent = false;
+ int64_t oldVersionNewBlocksUsed = 0;
+ try
+ {
+ RaidFileWrite storeFile(mStoreDiscSet, fn);
+ storeFile.Open(false /* no overwriting */);
+
+ // size adjustment from use of patch in old file
+ int64_t spaceSavedByConversionToPatch = 0;
+
+ // Diff or full file?
+ if(DiffFromFileID == 0)
+ {
+ // A full file, just store to disc
+ if(!rFile.CopyStreamTo(storeFile, BACKUP_STORE_TIMEOUT))
+ {
+ THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
+ }
+ }
+ else
+ {
+ // Check that the diffed from ID actually exists in the directory
+ if(dir.FindEntryByID(DiffFromFileID) == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, DiffFromIDNotFoundInDirectory)
+ }
+
+ // Diff file, needs to be recreated.
+ // Choose a temporary filename.
+ std::string tempFn(RaidFileController::DiscSetPathToFileSystemPath(mStoreDiscSet, fn + ".difftemp",
+ 1 /* NOT the same disc as the write file, to avoid using lots of space on the same disc unnecessarily */));
+
+ try
+ {
+ // Open it twice
+#ifdef WIN32
+ InvisibleTempFileStream diff(tempFn.c_str(),
+ O_RDWR | O_CREAT | O_BINARY);
+ InvisibleTempFileStream diff2(tempFn.c_str(),
+ O_RDWR | O_BINARY);
+#else
+ FileStream diff(tempFn.c_str(), O_RDWR | O_CREAT | O_EXCL);
+ FileStream diff2(tempFn.c_str(), O_RDONLY);
+
+ // Unlink it immediately, so it definitely goes away
+ if(::unlink(tempFn.c_str()) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+#endif
+
+ // Stream the incoming diff to this temporary file
+ if(!rFile.CopyStreamTo(diff, BACKUP_STORE_TIMEOUT))
+ {
+ THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
+ }
+
+ // Verify the diff
+ diff.Seek(0, IOStream::SeekType_Absolute);
+ if(!BackupStoreFile::VerifyEncodedFileFormat(diff))
+ {
+ THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
+ }
+
+ // Seek to beginning of diff file
+ diff.Seek(0, IOStream::SeekType_Absolute);
+
+ // Filename of the old version
+ std::string oldVersionFilename;
+ MakeObjectFilename(DiffFromFileID, oldVersionFilename, false /* no need to make sure the directory it's in exists */);
+
+ // Reassemble that diff -- open previous file, and combine the patch and file
+ std::auto_ptr<RaidFileRead> from(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
+ BackupStoreFile::CombineFile(diff, diff2, *from, storeFile);
+
+ // Then... reverse the patch back (open the from file again, and create a write file to overwrite it)
+ std::auto_ptr<RaidFileRead> from2(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
+ ppreviousVerStoreFile = new RaidFileWrite(mStoreDiscSet, oldVersionFilename);
+ ppreviousVerStoreFile->Open(true /* allow overwriting */);
+ from->Seek(0, IOStream::SeekType_Absolute);
+ diff.Seek(0, IOStream::SeekType_Absolute);
+ BackupStoreFile::ReverseDiffFile(diff, *from, *from2, *ppreviousVerStoreFile,
+ DiffFromFileID, &reversedDiffIsCompletelyDifferent);
+
+ // Store disc space used
+ oldVersionNewBlocksUsed = ppreviousVerStoreFile->GetDiscUsageInBlocks();
+
+ // And make a space adjustment for the size calculation
+ spaceSavedByConversionToPatch =
+ from->GetDiscUsageInBlocks() -
+ oldVersionNewBlocksUsed;
+
+ // Everything cleans up here...
+ }
+ catch(...)
+ {
+ // Be very paranoid about deleting this temp file -- we could only leave a zero byte file anyway
+ ::unlink(tempFn.c_str());
+ throw;
+ }
+ }
+
+ // Get the blocks used
+ newObjectBlocksUsed = storeFile.GetDiscUsageInBlocks();
+
+ // Exceeds the hard limit?
+ int64_t newBlocksUsed = mapStoreInfo->GetBlocksUsed() +
+ newObjectBlocksUsed - spaceSavedByConversionToPatch;
+ if(newBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
+ {
+ THROW_EXCEPTION(BackupStoreException, AddedFileExceedsStorageLimit)
+ // The store file will be deleted automatically by the RaidFile object
+ }
+
+ // Commit the file
+ storeFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ }
+ catch(...)
+ {
+ // Delete any previous version store file
+ if(ppreviousVerStoreFile != 0)
+ {
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+
+ throw;
+ }
+
+ // Verify the file -- only necessary for non-diffed versions
+ // NOTE: No need to catch exceptions and delete ppreviousVerStoreFile, because
+ // in the non-diffed code path it's never allocated.
+ if(DiffFromFileID == 0)
+ {
+ std::auto_ptr<RaidFileRead> checkFile(RaidFileRead::Open(mStoreDiscSet, fn));
+ if(!BackupStoreFile::VerifyEncodedFileFormat(*checkFile))
+ {
+ // Error! Delete the file
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Exception
+ THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
+ }
+ }
+
+ // Modify the directory -- first make all files with the same name
+ // marked as an old version
+ int64_t blocksInOldFiles = 0;
+ try
+ {
+ if(MarkFileWithSameNameAsOldVersions)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+
+ BackupStoreDirectory::Entry *e = 0;
+ while((e = i.Next()) != 0)
+ {
+ // First, check it's not an old version (cheaper comparison)
+ if(! e->IsOld())
+ {
+ // Compare name
+ if(e->GetName() == rFilename)
+ {
+ // Check that it's definately not an old version
+ ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0);
+ // Set old version flag
+ e->AddFlags(BackupStoreDirectory::Entry::Flags_OldVersion);
+ // Can safely do this, because we know we won't be here if it's already
+ // an old version
+ blocksInOldFiles += e->GetSizeInBlocks();
+ }
+ }
+ }
+ }
+
+ // Then the new entry
+ BackupStoreDirectory::Entry *pnewEntry = dir.AddEntry(rFilename,
+ ModificationTime, id, newObjectBlocksUsed,
+ BackupStoreDirectory::Entry::Flags_File,
+ AttributesHash);
+
+ // Adjust for the patch back stuff?
+ if(DiffFromFileID != 0)
+ {
+ // Get old version entry
+ BackupStoreDirectory::Entry *poldEntry = dir.FindEntryByID(DiffFromFileID);
+ ASSERT(poldEntry != 0);
+
+ // Adjust dependency info of file?
+ if(!reversedDiffIsCompletelyDifferent)
+ {
+ poldEntry->SetDependsNewer(id);
+ pnewEntry->SetDependsOlder(DiffFromFileID);
+ }
+
+ // Adjust size of old entry
+ int64_t oldSize = poldEntry->GetSizeInBlocks();
+ poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
+
+ // And adjust blocks used count, for later adjustment
+ newObjectBlocksUsed += (oldVersionNewBlocksUsed - oldSize);
+ blocksInOldFiles += (oldVersionNewBlocksUsed - oldSize);
+ }
+
+ // Write the directory back to disc
+ SaveDirectory(dir, InDirectory);
+
+ // Commit the old version's new patched version, now that the directory safely reflects
+ // the state of the files on disc.
+ if(ppreviousVerStoreFile != 0)
+ {
+ ppreviousVerStoreFile->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+ }
+ catch(...)
+ {
+ // Back out on adding that file
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Remove this entry from the cache
+ RemoveDirectoryFromCache(InDirectory);
+
+ // Delete any previous version store file
+ if(ppreviousVerStoreFile != 0)
+ {
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+
+ // Don't worry about the incremented number in the store info
+ throw;
+ }
+
+ // Check logic
+ ASSERT(ppreviousVerStoreFile == 0);
+
+ // Modify the store info
+
+ if(DiffFromFileID == 0)
+ {
+ mapStoreInfo->AdjustNumFiles(1);
+ }
+ else
+ {
+ mapStoreInfo->AdjustNumOldFiles(1);
+ }
+
+ mapStoreInfo->ChangeBlocksUsed(newObjectBlocksUsed);
+ mapStoreInfo->ChangeBlocksInCurrentFiles(newObjectBlocksUsed -
+ blocksInOldFiles);
+ mapStoreInfo->ChangeBlocksInOldFiles(blocksInOldFiles);
+
+ // Increment reference count on the new directory to one
+ mapRefCount->AddReference(id);
+
+ // Save the store info -- can cope if this exceptions because infomation
+ // will be rebuilt by housekeeping, and ID allocation can recover.
+ SaveStoreInfo(false);
+
+ // Return the ID to the caller
+ return id;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::DeleteFile(const BackupStoreFilename &, int64_t, int64_t &)
+// Purpose: Deletes a file, returning true if the file existed. Object ID returned too, set to zero if not found.
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_t InDirectory, int64_t &rObjectIDOut)
+{
+ // Essential checks!
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Find the directory the file is in (will exception if it fails)
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Setup flags
+ bool fileExisted = false;
+ bool madeChanges = false;
+ rObjectIDOut = 0; // not found
+
+ // Count of deleted blocks
+ int64_t blocksDel = 0;
+
+ try
+ {
+ // Iterate through directory, only looking at files which haven't been deleted
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *e = 0;
+ while((e = i.Next(BackupStoreDirectory::Entry::Flags_File,
+ BackupStoreDirectory::Entry::Flags_Deleted)) != 0)
+ {
+ // Compare name
+ if(e->GetName() == rFilename)
+ {
+ // Check that it's definately not already deleted
+ ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) == 0);
+ // Set deleted flag
+ e->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ // Mark as made a change
+ madeChanges = true;
+ // Can safely do this, because we know we won't be here if it's already
+ // an old version
+ blocksDel += e->GetSizeInBlocks();
+ // Is this the last version?
+ if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
+ {
+ // Yes. It's been found.
+ rObjectIDOut = e->GetObjectID();
+ fileExisted = true;
+ }
+ }
+ }
+
+ // Save changes?
+ if(madeChanges)
+ {
+ // Save the directory back
+ SaveDirectory(dir, InDirectory);
+
+ // Modify the store info, and write
+ // It definitely wasn't an old or deleted version
+ mapStoreInfo->AdjustNumFiles(-1);
+ mapStoreInfo->AdjustNumDeletedFiles(1);
+ mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
+
+ SaveStoreInfo(false);
+ }
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+
+ return fileExisted;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::UndeleteFile(int64_t, int64_t)
+// Purpose: Undeletes a file, if it exists, returning true if
+// the file existed.
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::UndeleteFile(int64_t ObjectID, int64_t InDirectory)
+{
+ // Essential checks!
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Find the directory the file is in (will exception if it fails)
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Setup flags
+ bool fileExisted = false;
+ bool madeChanges = false;
+
+ // Count of deleted blocks
+ int64_t blocksDel = 0;
+
+ try
+ {
+ // Iterate through directory, only looking at files which have been deleted
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *e = 0;
+ while((e = i.Next(BackupStoreDirectory::Entry::Flags_File |
+ BackupStoreDirectory::Entry::Flags_Deleted, 0)) != 0)
+ {
+ // Compare name
+ if(e->GetObjectID() == ObjectID)
+ {
+ // Check that it's definitely already deleted
+ ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0);
+ // Clear deleted flag
+ e->RemoveFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ // Mark as made a change
+ madeChanges = true;
+ blocksDel -= e->GetSizeInBlocks();
+
+ // Is this the last version?
+ if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
+ {
+ // Yes. It's been found.
+ fileExisted = true;
+ }
+ }
+ }
+
+ // Save changes?
+ if(madeChanges)
+ {
+ // Save the directory back
+ SaveDirectory(dir, InDirectory);
+
+ // Modify the store info, and write
+ mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
+
+ // Maybe postponed save of store info
+ SaveStoreInfo();
+ }
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+
+ return fileExisted;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::RemoveDirectoryFromCache(int64_t)
+// Purpose: Remove directory from cache
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::RemoveDirectoryFromCache(int64_t ObjectID)
+{
+ std::map<int64_t, BackupStoreDirectory*>::iterator item(mDirectoryCache.find(ObjectID));
+ if(item != mDirectoryCache.end())
+ {
+ // Delete this cached object
+ delete item->second;
+ // Erase the entry form the map
+ mDirectoryCache.erase(item);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::SaveDirectory(BackupStoreDirectory &, int64_t)
+// Purpose: Save directory back to disc, update time in cache
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(rDir.GetObjectID() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ try
+ {
+ // Write to disc, adjust size in store info
+ std::string dirfn;
+ MakeObjectFilename(ObjectID, dirfn);
+ {
+ RaidFileWrite writeDir(mStoreDiscSet, dirfn);
+ writeDir.Open(true /* allow overwriting */);
+
+ BufferedWriteStream buffer(writeDir);
+ rDir.WriteToStream(buffer);
+ buffer.Flush();
+
+ // get the disc usage (must do this before commiting it)
+ int64_t dirSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // Make sure the size of the directory is available for writing the dir back
+ ASSERT(dirSize > 0);
+ int64_t sizeAdjustment = dirSize - rDir.GetUserInfo1_SizeInBlocks();
+ mapStoreInfo->ChangeBlocksUsed(sizeAdjustment);
+ mapStoreInfo->ChangeBlocksInDirectories(sizeAdjustment);
+ // Update size stored in directory
+ rDir.SetUserInfo1_SizeInBlocks(dirSize);
+ }
+ // Refresh revision ID in cache
+ {
+ int64_t revid = 0;
+ if(!RaidFileRead::FileExists(mStoreDiscSet, dirfn, &revid))
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+ rDir.SetRevisionID(revid);
+ }
+ }
+ catch(...)
+ {
+ // Remove it from the cache if anything went wrong
+ RemoveDirectoryFromCache(ObjectID);
+ throw;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::AddDirectory(int64_t,
+// const BackupStoreFilename &, bool &)
+// Purpose: Creates a directory (or just returns the ID of an
+// existing one). rAlreadyExists set appropraitely.
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreFilename &rFilename, const StreamableMemBlock &Attributes, int64_t AttributesModTime, bool &rAlreadyExists)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Flags as not already existing
+ rAlreadyExists = false;
+
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Scan the directory for the name (only looking for directories which already exist)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING,
+ BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0) // Ignore deleted and old directories
+ {
+ if(en->GetName() == rFilename)
+ {
+ // Already exists
+ rAlreadyExists = true;
+ return en->GetObjectID();
+ }
+ }
+ }
+
+ // Allocate the next ID
+ int64_t id = AllocateObjectID();
+
+ // Create an empty directory with the given attributes on disc
+ std::string fn;
+ MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
+ {
+ BackupStoreDirectory emptyDir(id, InDirectory);
+ // add the atttribues
+ emptyDir.SetAttributes(Attributes, AttributesModTime);
+
+ // Write...
+ RaidFileWrite dirFile(mStoreDiscSet, fn);
+ dirFile.Open(false /* no overwriting */);
+ emptyDir.WriteToStream(dirFile);
+ // Get disc usage, before it's commited
+ int64_t dirSize = dirFile.GetDiscUsageInBlocks();
+ // Commit the file
+ dirFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // Make sure the size of the directory is added to the usage counts in the info
+ ASSERT(dirSize > 0);
+ mapStoreInfo->ChangeBlocksUsed(dirSize);
+ mapStoreInfo->ChangeBlocksInDirectories(dirSize);
+ // Not added to cache, so don't set the size in the directory
+ }
+
+ // Then add it into the parent directory
+ try
+ {
+ dir.AddEntry(rFilename, 0 /* modification time */, id, 0 /* blocks used */, BackupStoreDirectory::Entry::Flags_Dir, 0 /* attributes mod time */);
+ SaveDirectory(dir, InDirectory);
+
+ // Increment reference count on the new directory to one
+ mapRefCount->AddReference(id);
+ }
+ catch(...)
+ {
+ // Back out on adding that directory
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Remove this entry from the cache
+ RemoveDirectoryFromCache(InDirectory);
+
+ // Don't worry about the incremented number in the store info
+ throw;
+ }
+
+ // Save the store info (may not be postponed)
+ mapStoreInfo->AdjustNumDirectories(1);
+ SaveStoreInfo(false);
+
+ // tell caller what the ID was
+ return id;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::DeleteFile(const BackupStoreFilename &, int64_t, int64_t &, bool)
+// Purpose: Recusively deletes a directory (or undeletes if Undelete = true)
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
+{
+ // Essential checks!
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Containing directory
+ int64_t InDirectory = 0;
+
+ // Count of blocks deleted
+ int64_t blocksDeleted = 0;
+
+ try
+ {
+ // Get the directory that's to be deleted
+ {
+ // In block, because dir may not be valid after the delete directory call
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Store the directory it's in for later
+ InDirectory = dir.GetContainerID();
+
+ // Depth first delete of contents
+ DeleteDirectoryRecurse(ObjectID, blocksDeleted, Undelete);
+ }
+
+ // Remove the entry from the directory it's in
+ ASSERT(InDirectory != 0);
+ BackupStoreDirectory &parentDir(GetDirectoryInternal(InDirectory));
+
+ BackupStoreDirectory::Iterator i(parentDir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
+ Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0) // Ignore deleted directories (or not deleted if Undelete)
+ {
+ if(en->GetObjectID() == ObjectID)
+ {
+ // This is the one to delete
+ if(Undelete)
+ {
+ en->RemoveFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+ else
+ {
+ en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+
+ // Save it
+ SaveDirectory(parentDir, InDirectory);
+
+ // Done
+ break;
+ }
+ }
+
+ // Update blocks deleted count
+ mapStoreInfo->ChangeBlocksInDeletedFiles(Undelete?(0 - blocksDeleted):(blocksDeleted));
+ mapStoreInfo->AdjustNumDirectories(-1);
+ SaveStoreInfo(false);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::DeleteDirectoryRecurse(BackupStoreDirectory &, int64_t)
+// Purpose: Private. Deletes a directory depth-first recusively.
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete)
+{
+ try
+ {
+ // Does things carefully to avoid using a directory in the cache after recursive call
+ // because it may have been deleted.
+
+ // Do sub directories
+ {
+ // Get the directory...
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Then scan it for directories
+ std::vector<int64_t> subDirs;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ if(Undelete)
+ {
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir | BackupStoreDirectory::Entry::Flags_Deleted, // deleted dirs
+ BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING)) != 0)
+ {
+ // Store the directory ID.
+ subDirs.push_back(en->GetObjectID());
+ }
+ }
+ else
+ {
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir, // dirs only
+ BackupStoreDirectory::Entry::Flags_Deleted)) != 0) // but not deleted ones
+ {
+ // Store the directory ID.
+ subDirs.push_back(en->GetObjectID());
+ }
+ }
+
+ // Done with the directory for now. Recurse to sub directories
+ for(std::vector<int64_t>::const_iterator i = subDirs.begin(); i != subDirs.end(); ++i)
+ {
+ DeleteDirectoryRecurse((*i), rBlocksDeletedOut, Undelete);
+ }
+ }
+
+ // Then, delete the files. Will need to load the directory again because it might have
+ // been removed from the cache.
+ {
+ // Get the directory...
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Changes made?
+ bool changesMade = false;
+
+ // Run through files
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+
+ while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
+ Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0) // Ignore deleted directories (or not deleted if Undelete)
+ {
+ // Add/remove the deleted flags
+ if(Undelete)
+ {
+ en->RemoveFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+ else
+ {
+ en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+
+ // Keep count of the deleted blocks
+ if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
+ {
+ rBlocksDeletedOut += en->GetSizeInBlocks();
+ }
+
+ // Did something
+ changesMade = true;
+ }
+
+ // Save the directory
+ if(changesMade)
+ {
+ SaveDirectory(dir, ObjectID);
+ }
+ }
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(ObjectID);
+ throw;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::ChangeDirAttributes(int64_t, const StreamableMemBlock &, int64_t)
+// Purpose: Change the attributes of a directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::ChangeDirAttributes(int64_t Directory, const StreamableMemBlock &Attributes, int64_t AttributesModTime)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ try
+ {
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(Directory));
+
+ // Set attributes
+ dir.SetAttributes(Attributes, AttributesModTime);
+
+ // Save back
+ SaveDirectory(dir, Directory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(Directory);
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::ChangeFileAttributes(int64_t, int64_t, const StreamableMemBlock &, int64_t)
+// Purpose: Sets the attributes on a directory entry. Returns true if the object existed, false if it didn't.
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilename, int64_t InDirectory, const StreamableMemBlock &Attributes, int64_t AttributesHash, int64_t &rObjectIDOut)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ try
+ {
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = 0;
+ // Iterate through current versions of files, only
+ BackupStoreDirectory::Iterator i(dir);
+ while((en = i.Next(
+ BackupStoreDirectory::Entry::Flags_File,
+ BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)
+ ) != 0)
+ {
+ if(en->GetName() == rFilename)
+ {
+ // Set attributes
+ en->SetAttributes(Attributes, AttributesHash);
+
+ // Tell caller the object ID
+ rObjectIDOut = en->GetObjectID();
+
+ // Done
+ break;
+ }
+ }
+ if(en == 0)
+ {
+ // Didn't find it
+ return false;
+ }
+
+ // Save back
+ SaveDirectory(dir, InDirectory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+
+ // Changed, everything OK
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::ObjectExists(int64_t)
+// Purpose: Test to see if an object of this ID exists in the store
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Note that we need to allow object IDs a little bit greater than the last one in the store info,
+ // because the store info may not have got saved in an error condition. Max greater ID is
+ // STORE_INFO_SAVE_DELAY in this case, *2 to be safe.
+ if(ObjectID <= 0 || ObjectID > (mapStoreInfo->GetLastObjectIDUsed() + (STORE_INFO_SAVE_DELAY * 2)))
+ {
+ // Obviously bad object ID
+ return false;
+ }
+
+ // Test to see if it exists on the disc
+ std::string filename;
+ MakeObjectFilename(ObjectID, filename);
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename))
+ {
+ // RaidFile reports no file there
+ return false;
+ }
+
+ // Do we need to be more specific?
+ if(MustBe != ObjectExists_Anything)
+ {
+ // Open the file
+ std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename));
+
+ // Read the first integer
+ u_int32_t magic;
+ if(!objectFile->ReadFullBuffer(&magic, sizeof(magic), 0 /* not interested in how many read if failure */))
+ {
+ // Failed to get any bytes, must have failed
+ return false;
+ }
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ if(MustBe == ObjectExists_File && ntohl(magic) == OBJECTMAGIC_FILE_MAGIC_VALUE_V0)
+ {
+ // Old version detected
+ return true;
+ }
+#endif
+
+ // Right one?
+ u_int32_t requiredMagic = (MustBe == ObjectExists_File)?OBJECTMAGIC_FILE_MAGIC_VALUE_V1:OBJECTMAGIC_DIR_MAGIC_VALUE;
+
+ // Check
+ if(ntohl(magic) != requiredMagic)
+ {
+ return false;
+ }
+
+ // File is implicitly closed
+ }
+
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::OpenObject(int64_t)
+// Purpose: Opens an object
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<IOStream> BackupStoreContext::OpenObject(int64_t ObjectID)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Attempt to open the file
+ std::string fn;
+ MakeObjectFilename(ObjectID, fn);
+ return std::auto_ptr<IOStream>(RaidFileRead::Open(mStoreDiscSet, fn).release());
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::GetClientStoreMarker()
+// Purpose: Retrieve the client store marker
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+int64_t BackupStoreContext::GetClientStoreMarker()
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return mapStoreInfo->GetClientStoreMarker();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::GetStoreDiscUsageInfo(int64_t &, int64_t &, int64_t &)
+// Purpose: Get disc usage info from store info
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::GetStoreDiscUsageInfo(int64_t &rBlocksUsed, int64_t &rBlocksSoftLimit, int64_t &rBlocksHardLimit)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ rBlocksUsed = mapStoreInfo->GetBlocksUsed();
+ rBlocksSoftLimit = mapStoreInfo->GetBlocksSoftLimit();
+ rBlocksHardLimit = mapStoreInfo->GetBlocksHardLimit();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::HardLimitExceeded()
+// Purpose: Returns true if the hard limit has been exceeded
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+bool BackupStoreContext::HardLimitExceeded()
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return mapStoreInfo->GetBlocksUsed() > mapStoreInfo->GetBlocksHardLimit();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::SetClientStoreMarker(int64_t)
+// Purpose: Sets the client store marker, and commits it to disc
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::SetClientStoreMarker(int64_t ClientStoreMarker)
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ mapStoreInfo->SetClientStoreMarker(ClientStoreMarker);
+ SaveStoreInfo(false /* don't delay saving this */);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::MoveObject(int64_t, int64_t, int64_t, const BackupStoreFilename &, bool)
+// Purpose: Move an object (and all objects with the same name) from one directory to another
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory, int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename, bool MoveAllWithSameName, bool AllowMoveOverDeletedObject)
+{
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Should deleted files be excluded when checking for the existance of objects with the target name?
+ int64_t targetSearchExcludeFlags = (AllowMoveOverDeletedObject)
+ ?(BackupStoreDirectory::Entry::Flags_Deleted)
+ :(BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING);
+
+ // Special case if the directories are the same...
+ if(MoveFromDirectory == MoveToDirectory)
+ {
+ try
+ {
+ // Get the first directory
+ BackupStoreDirectory &dir(GetDirectoryInternal(MoveFromDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = dir.FindEntryByID(ObjectID);
+
+ // Error if not found
+ if(en == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+ }
+
+ // Check the new name doens't already exist (optionally ignoring deleted files)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING, targetSearchExcludeFlags)) != 0)
+ {
+ if(c->GetName() == rNewFilename)
+ {
+ THROW_EXCEPTION(BackupStoreException, NameAlreadyExistsInDirectory)
+ }
+ }
+ }
+
+ // Need to get all the entries with the same name?
+ if(MoveAllWithSameName)
+ {
+ // Iterate through the directory, copying all with matching names
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next()) != 0)
+ {
+ if(c->GetName() == en->GetName())
+ {
+ // Rename this one
+ c->SetName(rNewFilename);
+ }
+ }
+ }
+ else
+ {
+ // Just copy this one
+ en->SetName(rNewFilename);
+ }
+
+ // Save the directory back
+ SaveDirectory(dir, MoveFromDirectory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(MoveToDirectory); // either will do, as they're the same
+ throw;
+ }
+
+ return;
+ }
+
+ // Got to be careful how this is written, as we can't guarentte that if we have two
+ // directories open, the first won't be deleted as the second is opened. (cache)
+
+ // List of entries to move
+ std::vector<BackupStoreDirectory::Entry *> moving;
+
+ // list of directory IDs which need to have containing dir id changed
+ std::vector<int64_t> dirsToChangeContainingID;
+
+ try
+ {
+ // First of all, get copies of the entries to move to the to directory.
+
+ {
+ // Get the first directory
+ BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = from.FindEntryByID(ObjectID);
+
+ // Error if not found
+ if(en == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+ }
+
+ // Need to get all the entries with the same name?
+ if(MoveAllWithSameName)
+ {
+ // Iterate through the directory, copying all with matching names
+ BackupStoreDirectory::Iterator i(from);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next()) != 0)
+ {
+ if(c->GetName() == en->GetName())
+ {
+ // Copy
+ moving.push_back(new BackupStoreDirectory::Entry(*c));
+
+ // Check for containing directory correction
+ if(c->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(c->GetObjectID());
+ }
+ }
+ ASSERT(!moving.empty());
+ }
+ else
+ {
+ // Just copy this one
+ moving.push_back(new BackupStoreDirectory::Entry(*en));
+
+ // Check for containing directory correction
+ if(en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(en->GetObjectID());
+ }
+ }
+
+ // Secondly, insert them into the to directory, and save it
+
+ {
+ // To directory
+ BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
+
+ // Check the new name doens't already exist
+ {
+ BackupStoreDirectory::Iterator i(to);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING, targetSearchExcludeFlags)) != 0)
+ {
+ if(c->GetName() == rNewFilename)
+ {
+ THROW_EXCEPTION(BackupStoreException, NameAlreadyExistsInDirectory)
+ }
+ }
+ }
+
+ // Copy the entries into it, changing the name as we go
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ BackupStoreDirectory::Entry *en = (*i);
+ en->SetName(rNewFilename);
+ to.AddEntry(*en); // adds copy
+ }
+
+ // Save back
+ SaveDirectory(to, MoveToDirectory);
+ }
+
+ // Thirdly... remove them from the first directory -- but if it fails, attempt to delete them from the to directory
+ try
+ {
+ // Get directory
+ BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
+
+ // Delete each one
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ from.DeleteEntry((*i)->GetObjectID());
+ }
+
+ // Save back
+ SaveDirectory(from, MoveFromDirectory);
+ }
+ catch(...)
+ {
+ // UNDO modification to To directory
+
+ // Get directory
+ BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
+
+ // Delete each one
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ to.DeleteEntry((*i)->GetObjectID());
+ }
+
+ // Save back
+ SaveDirectory(to, MoveToDirectory);
+
+ // Throw the error
+ throw;
+ }
+
+ // Finally... for all the directories we moved, modify their containing directory ID
+ for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
+ {
+ // Load the directory
+ BackupStoreDirectory &change(GetDirectoryInternal(*i));
+
+ // Modify containing dir ID
+ change.SetContainerID(MoveToDirectory);
+
+ // Save it back
+ SaveDirectory(change, *i);
+ }
+ }
+ catch(...)
+ {
+ // Make sure directories aren't in the cache, as they may have been modified
+ RemoveDirectoryFromCache(MoveToDirectory);
+ RemoveDirectoryFromCache(MoveFromDirectory);
+ for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
+ {
+ RemoveDirectoryFromCache(*i);
+ }
+
+ while(!moving.empty())
+ {
+ delete moving.back();
+ moving.pop_back();
+ }
+ throw;
+ }
+
+ // Clean up
+ while(!moving.empty())
+ {
+ delete moving.back();
+ moving.pop_back();
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreContext::GetBackupStoreInfo()
+// Purpose: Return the backup store info object, exception if it isn't loaded
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+const BackupStoreInfo &BackupStoreContext::GetBackupStoreInfo() const
+{
+ if(mapStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return *(mapStoreInfo.get());
+}
+
+
diff --git a/lib/backupstore/BackupStoreContext.h b/lib/backupstore/BackupStoreContext.h
new file mode 100644
index 00000000..c33e7d50
--- /dev/null
+++ b/lib/backupstore/BackupStoreContext.h
@@ -0,0 +1,211 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreContext.h
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCONTEXT__H
+#define BACKUPCONTEXT__H
+
+#include <string>
+#include <map>
+#include <memory>
+
+#include "autogen_BackupProtocol.h"
+#include "BackupStoreInfo.h"
+#include "BackupStoreRefCountDatabase.h"
+#include "NamedLock.h"
+#include "Message.h"
+#include "Utils.h"
+
+class BackupStoreDirectory;
+class BackupStoreFilename;
+class IOStream;
+class BackupProtocolMessage;
+class StreamableMemBlock;
+
+class HousekeepingInterface
+{
+ public:
+ virtual ~HousekeepingInterface() { }
+ virtual void SendMessageToHousekeepingProcess(const void *Msg, int MsgLen) = 0;
+};
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreContext
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+class BackupStoreContext
+{
+public:
+ BackupStoreContext(int32_t ClientID, HousekeepingInterface &rDaemon,
+ const std::string& rConnectionDetails);
+ ~BackupStoreContext();
+private:
+ BackupStoreContext(const BackupStoreContext &rToCopy);
+public:
+
+ void ReceivedFinishCommand();
+ void CleanUp();
+
+ int32_t GetClientID() {return mClientID;}
+
+ enum
+ {
+ Phase_START = 0,
+ Phase_Version = 0,
+ Phase_Login = 1,
+ Phase_Commands = 2
+ };
+
+ int GetPhase() const {return mProtocolPhase;}
+ std::string GetPhaseName() const
+ {
+ switch(mProtocolPhase)
+ {
+ case Phase_Version: return "Phase_Version";
+ case Phase_Login: return "Phase_Login";
+ case Phase_Commands: return "Phase_Commands";
+ default:
+ std::ostringstream oss;
+ oss << "Unknown phase " << mProtocolPhase;
+ return oss.str();
+ }
+ }
+ void SetPhase(int NewPhase) {mProtocolPhase = NewPhase;}
+
+ // Read only locking
+ bool SessionIsReadOnly() {return mReadOnly;}
+ bool AttemptToGetWriteLock();
+
+ void SetClientHasAccount(const std::string &rStoreRoot, int StoreDiscSet) {mClientHasAccount = true; mStoreRoot = rStoreRoot; mStoreDiscSet = StoreDiscSet;}
+ bool GetClientHasAccount() const {return mClientHasAccount;}
+ const std::string &GetStoreRoot() const {return mStoreRoot;}
+ int GetStoreDiscSet() const {return mStoreDiscSet;}
+
+ // Store info
+ void LoadStoreInfo();
+ void SaveStoreInfo(bool AllowDelay = true);
+ const BackupStoreInfo &GetBackupStoreInfo() const;
+ const std::string GetAccountName()
+ {
+ if(!mapStoreInfo.get())
+ {
+ return "Unknown";
+ }
+ return mapStoreInfo->GetAccountName();
+ }
+
+ // Client marker
+ int64_t GetClientStoreMarker();
+ void SetClientStoreMarker(int64_t ClientStoreMarker);
+
+ // Usage information
+ void GetStoreDiscUsageInfo(int64_t &rBlocksUsed, int64_t &rBlocksSoftLimit, int64_t &rBlocksHardLimit);
+ bool HardLimitExceeded();
+
+ // Reading directories
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupStoreContext::GetDirectory(int64_t)
+ // Purpose: Return a reference to a directory. Valid only until the
+ // next time a function which affects directories is called.
+ // Mainly this funciton, and creation of files.
+ // Created: 2003/09/02
+ //
+ // --------------------------------------------------------------------------
+ const BackupStoreDirectory &GetDirectory(int64_t ObjectID)
+ {
+ // External callers aren't allowed to change it -- this function
+ // merely turns the the returned directory const.
+ return GetDirectoryInternal(ObjectID);
+ }
+
+ // Manipulating files/directories
+ int64_t AddFile(IOStream &rFile, int64_t InDirectory, int64_t ModificationTime, int64_t AttributesHash, int64_t DiffFromFileID, const BackupStoreFilename &rFilename, bool MarkFileWithSameNameAsOldVersions);
+ int64_t AddDirectory(int64_t InDirectory, const BackupStoreFilename &rFilename, const StreamableMemBlock &Attributes, int64_t AttributesModTime, bool &rAlreadyExists);
+ void ChangeDirAttributes(int64_t Directory, const StreamableMemBlock &Attributes, int64_t AttributesModTime);
+ bool ChangeFileAttributes(const BackupStoreFilename &rFilename, int64_t InDirectory, const StreamableMemBlock &Attributes, int64_t AttributesHash, int64_t &rObjectIDOut);
+ bool DeleteFile(const BackupStoreFilename &rFilename, int64_t InDirectory, int64_t &rObjectIDOut);
+ bool UndeleteFile(int64_t ObjectID, int64_t InDirectory);
+ void DeleteDirectory(int64_t ObjectID, bool Undelete = false);
+ void MoveObject(int64_t ObjectID, int64_t MoveFromDirectory, int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename, bool MoveAllWithSameName, bool AllowMoveOverDeletedObject);
+
+ // Manipulating objects
+ enum
+ {
+ ObjectExists_Anything = 0,
+ ObjectExists_File = 1,
+ ObjectExists_Directory = 2
+ };
+ bool ObjectExists(int64_t ObjectID, int MustBe = ObjectExists_Anything);
+ std::auto_ptr<IOStream> OpenObject(int64_t ObjectID);
+
+ // Info
+ int32_t GetClientID() const {return mClientID;}
+ const std::string& GetConnectionDetails() { return mConnectionDetails; }
+
+private:
+ void MakeObjectFilename(int64_t ObjectID, std::string &rOutput, bool EnsureDirectoryExists = false);
+ BackupStoreDirectory &GetDirectoryInternal(int64_t ObjectID);
+ void SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID);
+ void RemoveDirectoryFromCache(int64_t ObjectID);
+ void DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete);
+ int64_t AllocateObjectID();
+
+ std::string mConnectionDetails;
+ int32_t mClientID;
+ HousekeepingInterface &mrDaemon;
+ int mProtocolPhase;
+ bool mClientHasAccount;
+ std::string mStoreRoot; // has final directory separator
+ int mStoreDiscSet;
+ bool mReadOnly;
+ NamedLock mWriteLock;
+ int mSaveStoreInfoDelay; // how many times to delay saving the store info
+
+ // Store info
+ std::auto_ptr<BackupStoreInfo> mapStoreInfo;
+
+ // Refcount database
+ std::auto_ptr<BackupStoreRefCountDatabase> mapRefCount;
+
+ // Directory cache
+ std::map<int64_t, BackupStoreDirectory*> mDirectoryCache;
+
+public:
+ class TestHook
+ {
+ public:
+ virtual std::auto_ptr<BackupProtocolMessage>
+ StartCommand(const BackupProtocolMessage& rCommand) = 0;
+ virtual ~TestHook() { }
+ };
+ void SetTestHook(TestHook& rTestHook)
+ {
+ mpTestHook = &rTestHook;
+ }
+ std::auto_ptr<BackupProtocolMessage>
+ StartCommandHook(const BackupProtocolMessage& rCommand)
+ {
+ if(mpTestHook)
+ {
+ return mpTestHook->StartCommand(rCommand);
+ }
+ return std::auto_ptr<BackupProtocolMessage>();
+ }
+
+private:
+ TestHook* mpTestHook;
+};
+
+#endif // BACKUPCONTEXT__H
+
diff --git a/lib/backupstore/BackupStoreDirectory.cpp b/lib/backupstore/BackupStoreDirectory.cpp
new file mode 100644
index 00000000..81126ede
--- /dev/null
+++ b/lib/backupstore/BackupStoreDirectory.cpp
@@ -0,0 +1,578 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreDirectory.h
+// Purpose: Representation of a backup directory
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <sys/types.h>
+
+#include "BackupStoreDirectory.h"
+#include "IOStream.h"
+#include "BackupStoreException.h"
+#include "BackupStoreObjectMagic.h"
+
+#include "MemLeakFindOn.h"
+
+// set packing to one byte
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "BeginStructPackForWire.h"
+#else
+BEGIN_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+typedef struct
+{
+ int32_t mMagicValue; // also the version number
+ int32_t mNumEntries;
+ int64_t mObjectID; // this object ID
+ int64_t mContainerID; // ID of container
+ uint64_t mAttributesModTime;
+ int32_t mOptionsPresent; // bit mask of optional sections / features present
+ // Then a StreamableMemBlock for attributes
+} dir_StreamFormat;
+
+typedef enum
+{
+ Option_DependencyInfoPresent = 1
+} dir_StreamFormatOptions;
+
+typedef struct
+{
+ uint64_t mModificationTime;
+ int64_t mObjectID;
+ int64_t mSizeInBlocks;
+ uint64_t mAttributesHash;
+ int16_t mFlags; // order smaller items after bigger ones (for alignment)
+ // Then a BackupStoreFilename
+ // Then a StreamableMemBlock for attributes
+} en_StreamFormat;
+
+typedef struct
+{
+ int64_t mDependsNewer;
+ int64_t mDependsOlder;
+} en_StreamFormatDepends;
+
+// Use default packing
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "EndStructPackForWire.h"
+#else
+END_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::BackupStoreDirectory()
+// Purpose: Constructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::BackupStoreDirectory()
+ : mRevisionID(0), mObjectID(0), mContainerID(0), mAttributesModTime(0), mUserInfo1(0)
+{
+ ASSERT(sizeof(u_int64_t) == sizeof(box_time_t));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreDirectory::BackupStoreDirectory(int64_t, int64_t)
+// Purpose: Constructor giving object and container IDs
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::BackupStoreDirectory(int64_t ObjectID, int64_t ContainerID)
+ : mRevisionID(0), mObjectID(ObjectID), mContainerID(ContainerID), mAttributesModTime(0), mUserInfo1(0)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::~BackupStoreDirectory()
+// Purpose: Destructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::~BackupStoreDirectory()
+{
+ for(std::vector<Entry*>::iterator i(mEntries.begin()); i != mEntries.end(); ++i)
+ {
+ delete (*i);
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::ReadFromStream(IOStream &, int)
+// Purpose: Reads the directory contents from a stream.
+// Exceptions will result in incomplete reads.
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::ReadFromStream(IOStream &rStream, int Timeout)
+{
+ // Get the header
+ dir_StreamFormat hdr;
+ if(!rStream.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Check magic value...
+ if(OBJECTMAGIC_DIR_MAGIC_VALUE != ntohl(hdr.mMagicValue))
+ {
+ THROW_EXCEPTION_MESSAGE(BackupStoreException, BadDirectoryFormat,
+ "Wrong magic number in directory object " <<
+ BOX_FORMAT_OBJECTID(mObjectID) << ": expected " <<
+ BOX_FORMAT_HEX32(OBJECTMAGIC_DIR_MAGIC_VALUE) <<
+ " but found " <<
+ BOX_FORMAT_HEX32(ntohl(hdr.mMagicValue)));
+ }
+
+ // Get data
+ mObjectID = box_ntoh64(hdr.mObjectID);
+ mContainerID = box_ntoh64(hdr.mContainerID);
+ mAttributesModTime = box_ntoh64(hdr.mAttributesModTime);
+
+ // Options
+ int32_t options = ntohl(hdr.mOptionsPresent);
+
+ // Get attributes
+ mAttributes.ReadFromStream(rStream, Timeout);
+
+ // Decode count
+ int count = ntohl(hdr.mNumEntries);
+
+ // Clear existing list
+ for(std::vector<Entry*>::iterator i = mEntries.begin();
+ i != mEntries.end(); i++)
+ {
+ delete (*i);
+ }
+ mEntries.clear();
+
+ // Read them in!
+ for(int c = 0; c < count; ++c)
+ {
+ Entry *pen = new Entry;
+ try
+ {
+ // Read from stream
+ pen->ReadFromStream(rStream, Timeout);
+
+ // Add to list
+ mEntries.push_back(pen);
+ }
+ catch(...)
+ {
+ delete pen;
+ throw;
+ }
+ }
+
+ // Read in dependency info?
+ if(options & Option_DependencyInfoPresent)
+ {
+ // Read in extra dependency data
+ for(int c = 0; c < count; ++c)
+ {
+ mEntries[c]->ReadFromStreamDependencyInfo(rStream, Timeout);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::WriteToStream(IOStream &, int16_t, int16_t, bool, bool)
+// Purpose: Writes a selection of entries to a stream
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeSet, int16_t FlagsNotToBeSet, bool StreamAttributes, bool StreamDependencyInfo) const
+{
+ // Get count of entries
+ int32_t count = mEntries.size();
+ if(FlagsMustBeSet != Entry::Flags_INCLUDE_EVERYTHING || FlagsNotToBeSet != Entry::Flags_EXCLUDE_NOTHING)
+ {
+ // Need to count the entries
+ count = 0;
+ Iterator i(*this);
+ while(i.Next(FlagsMustBeSet, FlagsNotToBeSet) != 0)
+ {
+ count++;
+ }
+ }
+
+ // Check that sensible IDs have been set
+ ASSERT(mObjectID != 0);
+ ASSERT(mContainerID != 0);
+
+ // Need dependency info?
+ bool dependencyInfoRequired = false;
+ if(StreamDependencyInfo)
+ {
+ Iterator i(*this);
+ Entry *pen = 0;
+ while((pen = i.Next(FlagsMustBeSet, FlagsNotToBeSet)) != 0)
+ {
+ if(pen->HasDependencies())
+ {
+ dependencyInfoRequired = true;
+ }
+ }
+ }
+
+ // Options
+ int32_t options = 0;
+ if(dependencyInfoRequired) options |= Option_DependencyInfoPresent;
+
+ // Build header
+ dir_StreamFormat hdr;
+ hdr.mMagicValue = htonl(OBJECTMAGIC_DIR_MAGIC_VALUE);
+ hdr.mNumEntries = htonl(count);
+ hdr.mObjectID = box_hton64(mObjectID);
+ hdr.mContainerID = box_hton64(mContainerID);
+ hdr.mAttributesModTime = box_hton64(mAttributesModTime);
+ hdr.mOptionsPresent = htonl(options);
+
+ // Write header
+ rStream.Write(&hdr, sizeof(hdr));
+
+ // Write the attributes?
+ if(StreamAttributes)
+ {
+ mAttributes.WriteToStream(rStream);
+ }
+ else
+ {
+ // Write a blank header instead
+ StreamableMemBlock::WriteEmptyBlockToStream(rStream);
+ }
+
+ // Then write all the entries
+ Iterator i(*this);
+ Entry *pen = 0;
+ while((pen = i.Next(FlagsMustBeSet, FlagsNotToBeSet)) != 0)
+ {
+ pen->WriteToStream(rStream);
+ }
+
+ // Write dependency info?
+ if(dependencyInfoRequired)
+ {
+ Iterator i(*this);
+ Entry *pen = 0;
+ while((pen = i.Next(FlagsMustBeSet, FlagsNotToBeSet)) != 0)
+ {
+ pen->WriteToStreamDependencyInfo(rStream);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::AddEntry(const Entry &)
+// Purpose: Adds entry to directory (no checking)
+// Created: 2003/08/27
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry *BackupStoreDirectory::AddEntry(const Entry &rEntryToCopy)
+{
+ Entry *pnew = new Entry(rEntryToCopy);
+ try
+ {
+ mEntries.push_back(pnew);
+ }
+ catch(...)
+ {
+ delete pnew;
+ throw;
+ }
+
+ return pnew;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::AddEntry(const BackupStoreFilename &, int64_t, int64_t, int16_t)
+// Purpose: Adds entry to directory (no checking)
+// Created: 2003/08/27
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry *
+BackupStoreDirectory::AddEntry(const BackupStoreFilename &rName,
+ box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks,
+ int16_t Flags, uint64_t AttributesHash)
+{
+ Entry *pnew = new Entry(rName, ModificationTime, ObjectID,
+ SizeInBlocks, Flags, AttributesHash);
+ try
+ {
+ mEntries.push_back(pnew);
+ }
+ catch(...)
+ {
+ delete pnew;
+ throw;
+ }
+
+ return pnew;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::DeleteEntry(int64_t)
+// Purpose: Deletes entry with given object ID (uses linear search, maybe a little inefficient)
+// Created: 2003/08/27
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::DeleteEntry(int64_t ObjectID)
+{
+ for(std::vector<Entry*>::iterator i(mEntries.begin());
+ i != mEntries.end(); ++i)
+ {
+ if((*i)->mObjectID == ObjectID)
+ {
+ // Delete
+ delete (*i);
+ // Remove from list
+ mEntries.erase(i);
+ // Done
+ return;
+ }
+ }
+
+ // Not found
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::FindEntryByID(int64_t)
+// Purpose: Finds a specific entry. Returns 0 if the entry doesn't exist.
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry *BackupStoreDirectory::FindEntryByID(int64_t ObjectID) const
+{
+ for(std::vector<Entry*>::const_iterator i(mEntries.begin());
+ i != mEntries.end(); ++i)
+ {
+ if((*i)->mObjectID == ObjectID)
+ {
+ // Found
+ return (*i);
+ }
+ }
+
+ // Not found
+ return 0;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::Entry()
+// Purpose: Constructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry::Entry()
+ : mModificationTime(0),
+ mObjectID(0),
+ mSizeInBlocks(0),
+ mFlags(0),
+ mAttributesHash(0),
+ mMinMarkNumber(0),
+ mMarkNumber(0),
+ mDependsNewer(0),
+ mDependsOlder(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::~Entry()
+// Purpose: Destructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry::~Entry()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::Entry(const Entry &)
+// Purpose: Copy constructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry::Entry(const Entry &rToCopy)
+ : mName(rToCopy.mName),
+ mModificationTime(rToCopy.mModificationTime),
+ mObjectID(rToCopy.mObjectID),
+ mSizeInBlocks(rToCopy.mSizeInBlocks),
+ mFlags(rToCopy.mFlags),
+ mAttributesHash(rToCopy.mAttributesHash),
+ mAttributes(rToCopy.mAttributes),
+ mMinMarkNumber(rToCopy.mMinMarkNumber),
+ mMarkNumber(rToCopy.mMarkNumber),
+ mDependsNewer(rToCopy.mDependsNewer),
+ mDependsOlder(rToCopy.mDependsOlder)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::Entry(const BackupStoreFilename &, int64_t, int64_t, int16_t)
+// Purpose: Constructor from values
+// Created: 2003/08/27
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory::Entry::Entry(const BackupStoreFilename &rName, box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks, int16_t Flags, uint64_t AttributesHash)
+ : mName(rName),
+ mModificationTime(ModificationTime),
+ mObjectID(ObjectID),
+ mSizeInBlocks(SizeInBlocks),
+ mFlags(Flags),
+ mAttributesHash(AttributesHash),
+ mMinMarkNumber(0),
+ mMarkNumber(0),
+ mDependsNewer(0),
+ mDependsOlder(0)
+{
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::TryReading(IOStream &, int)
+// Purpose: Read an entry from a stream
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::Entry::ReadFromStream(IOStream &rStream, int Timeout)
+{
+ // Grab the raw bytes from the stream which compose the header
+ en_StreamFormat entry;
+ if(!rStream.ReadFullBuffer(&entry, sizeof(entry), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Do reading first before modifying the variables, to be more exception safe
+
+ // Get the filename
+ BackupStoreFilename name;
+ name.ReadFromStream(rStream, Timeout);
+
+ // Get the attributes
+ mAttributes.ReadFromStream(rStream, Timeout);
+
+ // Store the rest of the bits
+ mModificationTime = box_ntoh64(entry.mModificationTime);
+ mObjectID = box_ntoh64(entry.mObjectID);
+ mSizeInBlocks = box_ntoh64(entry.mSizeInBlocks);
+ mAttributesHash = box_ntoh64(entry.mAttributesHash);
+ mFlags = ntohs(entry.mFlags);
+ mName = name;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::WriteToStream(IOStream &)
+// Purpose: Writes the entry to a stream
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::Entry::WriteToStream(IOStream &rStream) const
+{
+ // Build a structure
+ en_StreamFormat entry;
+ entry.mModificationTime = box_hton64(mModificationTime);
+ entry.mObjectID = box_hton64(mObjectID);
+ entry.mSizeInBlocks = box_hton64(mSizeInBlocks);
+ entry.mAttributesHash = box_hton64(mAttributesHash);
+ entry.mFlags = htons(mFlags);
+
+ // Write it
+ rStream.Write(&entry, sizeof(entry));
+
+ // Write the filename
+ mName.WriteToStream(rStream);
+
+ // Write any attributes
+ mAttributes.WriteToStream(rStream);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::ReadFromStreamDependencyInfo(IOStream &, int)
+// Purpose: Read the optional dependency info from a stream
+// Created: 13/7/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::Entry::ReadFromStreamDependencyInfo(IOStream &rStream, int Timeout)
+{
+ // Grab the raw bytes from the stream which compose the header
+ en_StreamFormatDepends depends;
+ if(!rStream.ReadFullBuffer(&depends, sizeof(depends), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Store the data
+ mDependsNewer = box_ntoh64(depends.mDependsNewer);
+ mDependsOlder = box_ntoh64(depends.mDependsOlder);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDirectory::Entry::WriteToStreamDependencyInfo(IOStream &)
+// Purpose: Write the optional dependency info to a stream
+// Created: 13/7/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreDirectory::Entry::WriteToStreamDependencyInfo(IOStream &rStream) const
+{
+ // Build structure
+ en_StreamFormatDepends depends;
+ depends.mDependsNewer = box_hton64(mDependsNewer);
+ depends.mDependsOlder = box_hton64(mDependsOlder);
+ // Write
+ rStream.Write(&depends, sizeof(depends));
+}
+
+
+
diff --git a/lib/backupstore/BackupStoreDirectory.h b/lib/backupstore/BackupStoreDirectory.h
new file mode 100644
index 00000000..1348f4e6
--- /dev/null
+++ b/lib/backupstore/BackupStoreDirectory.h
@@ -0,0 +1,289 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreDirectory.h
+// Purpose: Representation of a backup directory
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREDIRECTORY__H
+#define BACKUPSTOREDIRECTORY__H
+
+#include <string>
+#include <vector>
+
+#include "BackupStoreFilenameClear.h"
+#include "StreamableMemBlock.h"
+#include "BoxTime.h"
+
+class IOStream;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreDirectory
+// Purpose: In memory representation of a directory
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+class BackupStoreDirectory
+{
+public:
+ BackupStoreDirectory();
+ BackupStoreDirectory(int64_t ObjectID, int64_t ContainerID);
+private:
+ // Copying not allowed
+ BackupStoreDirectory(const BackupStoreDirectory &rToCopy);
+public:
+ ~BackupStoreDirectory();
+
+ class Entry
+ {
+ public:
+ friend class BackupStoreDirectory;
+
+ Entry();
+ ~Entry();
+ Entry(const Entry &rToCopy);
+ Entry(const BackupStoreFilename &rName, box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks, int16_t Flags, uint64_t AttributesHash);
+
+ void ReadFromStream(IOStream &rStream, int Timeout);
+ void WriteToStream(IOStream &rStream) const;
+
+ const BackupStoreFilename &GetName() const {return mName;}
+ box_time_t GetModificationTime() const {return mModificationTime;}
+ int64_t GetObjectID() const {return mObjectID;}
+ int64_t GetSizeInBlocks() const {return mSizeInBlocks;}
+ int16_t GetFlags() const {return mFlags;}
+ void AddFlags(int16_t Flags) {mFlags |= Flags;}
+ void RemoveFlags(int16_t Flags) {mFlags &= ~Flags;}
+
+ // Some things can be changed
+ void SetName(const BackupStoreFilename &rNewName) {mName = rNewName;}
+ void SetSizeInBlocks(int64_t SizeInBlocks) {mSizeInBlocks = SizeInBlocks;}
+
+ // Attributes
+ bool HasAttributes() const {return !mAttributes.IsEmpty();}
+ void SetAttributes(const StreamableMemBlock &rAttr, uint64_t AttributesHash) {mAttributes.Set(rAttr); mAttributesHash = AttributesHash;}
+ const StreamableMemBlock &GetAttributes() const {return mAttributes;}
+ uint64_t GetAttributesHash() const {return mAttributesHash;}
+
+ // Marks
+ // The lowest mark number a version of a file of this name has ever had
+ uint32_t GetMinMarkNumber() const {return mMinMarkNumber;}
+ // The mark number on this file
+ uint32_t GetMarkNumber() const {return mMarkNumber;}
+
+ // Make sure these flags are synced with those in backupprocotol.txt
+ // ListDirectory command
+ enum
+ {
+ Flags_INCLUDE_EVERYTHING = -1,
+ Flags_EXCLUDE_NOTHING = 0,
+ Flags_EXCLUDE_EVERYTHING = 31, // make sure this is kept as sum of ones below!
+ Flags_File = 1,
+ Flags_Dir = 2,
+ Flags_Deleted = 4,
+ Flags_OldVersion = 8,
+ Flags_RemoveASAP = 16 // if this flag is set, housekeeping will remove it as it is marked Deleted or OldVersion
+ };
+ // characters for textual listing of files -- see bbackupquery/BackupQueries
+ #define BACKUPSTOREDIRECTORY_ENTRY_FLAGS_DISPLAY_NAMES "fdXoR"
+
+ // convenience methods
+ bool inline IsDir()
+ {
+ return GetFlags() & Flags_Dir;
+ }
+ bool inline IsFile()
+ {
+ return GetFlags() & Flags_File;
+ }
+ bool inline IsOld()
+ {
+ return GetFlags() & Flags_OldVersion;
+ }
+ bool inline IsDeleted()
+ {
+ return GetFlags() & Flags_Deleted;
+ }
+ bool inline MatchesFlags(int16_t FlagsMustBeSet, int16_t FlagsNotToBeSet)
+ {
+ return ((FlagsMustBeSet == Flags_INCLUDE_EVERYTHING) || ((mFlags & FlagsMustBeSet) == FlagsMustBeSet))
+ && ((mFlags & FlagsNotToBeSet) == 0);
+ };
+
+ // Get dependency info
+ // new version this depends on
+ int64_t GetDependsNewer() const {return mDependsNewer;}
+ void SetDependsNewer(int64_t ObjectID) {mDependsNewer = ObjectID;}
+ // older version which depends on this
+ int64_t GetDependsOlder() const {return mDependsOlder;}
+ void SetDependsOlder(int64_t ObjectID) {mDependsOlder = ObjectID;}
+
+ // Dependency info saving
+ bool HasDependencies() {return mDependsNewer != 0 || mDependsOlder != 0;}
+ void ReadFromStreamDependencyInfo(IOStream &rStream, int Timeout);
+ void WriteToStreamDependencyInfo(IOStream &rStream) const;
+
+ private:
+ BackupStoreFilename mName;
+ box_time_t mModificationTime;
+ int64_t mObjectID;
+ int64_t mSizeInBlocks;
+ int16_t mFlags;
+ uint64_t mAttributesHash;
+ StreamableMemBlock mAttributes;
+ uint32_t mMinMarkNumber;
+ uint32_t mMarkNumber;
+
+ uint64_t mDependsNewer; // new version this depends on
+ uint64_t mDependsOlder; // older version which depends on this
+ };
+
+ void ReadFromStream(IOStream &rStream, int Timeout);
+ void WriteToStream(IOStream &rStream,
+ int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING,
+ int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING,
+ bool StreamAttributes = true, bool StreamDependencyInfo = true) const;
+
+ Entry *AddEntry(const Entry &rEntryToCopy);
+ Entry *AddEntry(const BackupStoreFilename &rName,
+ box_time_t ModificationTime, int64_t ObjectID,
+ int64_t SizeInBlocks, int16_t Flags,
+ uint64_t AttributesHash);
+ void DeleteEntry(int64_t ObjectID);
+ Entry *FindEntryByID(int64_t ObjectID) const;
+
+ int64_t GetObjectID() const {return mObjectID;}
+ int64_t GetContainerID() const {return mContainerID;}
+
+ // Need to be able to update the container ID when moving objects
+ void SetContainerID(int64_t ContainerID) {mContainerID = ContainerID;}
+
+ // Purely for use of server -- not serialised into streams
+ int64_t GetRevisionID() const {return mRevisionID;}
+ void SetRevisionID(int64_t RevisionID) {mRevisionID = RevisionID;}
+
+ unsigned int GetNumberOfEntries() const {return mEntries.size();}
+
+ // User info -- not serialised into streams
+ int64_t GetUserInfo1_SizeInBlocks() const {return mUserInfo1;}
+ void SetUserInfo1_SizeInBlocks(int64_t UserInfo1) {mUserInfo1 = UserInfo1;}
+
+ // Attributes
+ bool HasAttributes() const {return !mAttributes.IsEmpty();}
+ void SetAttributes(const StreamableMemBlock &rAttr, box_time_t AttributesModTime) {mAttributes.Set(rAttr); mAttributesModTime = AttributesModTime;}
+ const StreamableMemBlock &GetAttributes() const {return mAttributes;}
+ box_time_t GetAttributesModTime() const {return mAttributesModTime;}
+
+ class Iterator
+ {
+ public:
+ Iterator(const BackupStoreDirectory &rDir)
+ : mrDir(rDir), i(rDir.mEntries.begin())
+ {
+ }
+
+ BackupStoreDirectory::Entry *Next(int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING, int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING)
+ {
+ // Skip over things which don't match the required flags
+ while(i != mrDir.mEntries.end() && !(*i)->MatchesFlags(FlagsMustBeSet, FlagsNotToBeSet))
+ {
+ ++i;
+ }
+ // Not the last one?
+ if(i == mrDir.mEntries.end())
+ {
+ return 0;
+ }
+ // Return entry, and increment
+ return (*(i++));
+ }
+
+ // WARNING: This function is really very inefficient.
+ // Only use when you want to look up ONE filename, not in a loop looking up lots.
+ // In a looping situation, cache the decrypted filenames in another memory structure.
+ BackupStoreDirectory::Entry *FindMatchingClearName(const BackupStoreFilenameClear &rFilename, int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING, int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING)
+ {
+ // Skip over things which don't match the required flags or filename
+ while( (i != mrDir.mEntries.end())
+ && ( (!(*i)->MatchesFlags(FlagsMustBeSet, FlagsNotToBeSet))
+ || (BackupStoreFilenameClear((*i)->GetName()).GetClearFilename() != rFilename.GetClearFilename()) ) )
+ {
+ ++i;
+ }
+ // Not the last one?
+ if(i == mrDir.mEntries.end())
+ {
+ return 0;
+ }
+ // Return entry, and increment
+ return (*(i++));
+ }
+
+ private:
+ const BackupStoreDirectory &mrDir;
+ std::vector<Entry*>::const_iterator i;
+ };
+
+ friend class Iterator;
+
+ class ReverseIterator
+ {
+ public:
+ ReverseIterator(const BackupStoreDirectory &rDir)
+ : mrDir(rDir), i(rDir.mEntries.rbegin())
+ {
+ }
+
+ BackupStoreDirectory::Entry *Next(int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING, int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING)
+ {
+ // Skip over things which don't match the required flags
+ while(i != mrDir.mEntries.rend() && !(*i)->MatchesFlags(FlagsMustBeSet, FlagsNotToBeSet))
+ {
+ ++i;
+ }
+ // Not the last one?
+ if(i == mrDir.mEntries.rend())
+ {
+ return 0;
+ }
+ // Return entry, and increment
+ return (*(i++));
+ }
+
+ private:
+ const BackupStoreDirectory &mrDir;
+ std::vector<Entry*>::const_reverse_iterator i;
+ };
+
+ friend class ReverseIterator;
+
+ // For recovery of the store
+ // Implemented in BackupStoreCheck2.cpp
+ bool CheckAndFix();
+ void AddUnattachedObject(const BackupStoreFilename &rName,
+ box_time_t ModificationTime, int64_t ObjectID,
+ int64_t SizeInBlocks, int16_t Flags);
+ bool NameInUse(const BackupStoreFilename &rName);
+
+ // For testing
+ // Don't use these functions in normal code!
+ void TESTONLY_SetObjectID(int64_t ObjectID) {mObjectID = ObjectID;}
+ // Debug and diagnostics
+ void Dump(void *clibFileHandle, bool ToTrace); // first arg is FILE *, but avoid including stdio.h everywhere
+
+private:
+ int64_t mRevisionID;
+ int64_t mObjectID;
+ int64_t mContainerID;
+ std::vector<Entry*> mEntries;
+ box_time_t mAttributesModTime;
+ StreamableMemBlock mAttributes;
+ int64_t mUserInfo1;
+};
+
+#endif // BACKUPSTOREDIRECTORY__H
+
diff --git a/lib/backupstore/BackupStoreException.h b/lib/backupstore/BackupStoreException.h
new file mode 100644
index 00000000..981dfa60
--- /dev/null
+++ b/lib/backupstore/BackupStoreException.h
@@ -0,0 +1,17 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreException.h
+// Purpose: Exception
+// Created: 2003/07/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREEXCEPTION__H
+#define BACKUPSTOREEXCEPTION__H
+
+// Compatibility
+#include "autogen_BackupStoreException.h"
+
+#endif // BACKUPSTOREEXCEPTION__H
+
diff --git a/lib/backupstore/BackupStoreException.txt b/lib/backupstore/BackupStoreException.txt
new file mode 100644
index 00000000..ece772c0
--- /dev/null
+++ b/lib/backupstore/BackupStoreException.txt
@@ -0,0 +1,72 @@
+EXCEPTION BackupStore 4
+
+Internal 0
+BadAccountDatabaseFile 1
+AccountDatabaseNoSuchEntry 2
+InvalidBackupStoreFilename 3
+UnknownFilenameEncoding 4
+CouldntReadEntireStructureFromStream 5
+BadDirectoryFormat 6
+CouldNotFindEntryInDirectory 7
+OutputFileAlreadyExists 8
+OSFileError 9
+StreamDoesntHaveRequiredFeatures 10
+BadBackupStoreFile 11
+CouldNotLoadStoreInfo 12
+BadStoreInfoOnLoad 13
+StoreInfoIsReadOnly 14
+StoreInfoDirNotInList 15
+StoreInfoBlockDeltaMakesValueNegative 16
+DirectoryHasBeenDeleted 17
+StoreInfoNotInitialised 18
+StoreInfoAlreadyLoaded 19
+StoreInfoNotLoaded 20
+ReadFileFromStreamTimedOut 21
+FileWrongSizeAfterBeingStored 22
+AddedFileDoesNotVerify 23
+StoreInfoForWrongAccount 24
+ContextIsReadOnly 25
+AttributesNotLoaded 26
+AttributesNotUnderstood 27
+WrongServerVersion 28 # client side
+ClientMarkerNotAsExpected 29 Another process logged into the store and modified it while this process was running. Check you're not running two or more clients on the same account.
+NameAlreadyExistsInDirectory 30
+BerkelyDBFailure 31 # client side
+InodeMapIsReadOnly 32 # client side
+InodeMapNotOpen 33 # client side
+FilenameEncryptionKeyNotKnown 34
+FilenameEncryptionNoKeyForSpecifiedMethod 35
+FilenameEncryptionNotSetup 36
+CouldntLoadClientKeyMaterial 37
+BadEncryptedAttributes 38
+EncryptedAttributesHaveUnknownEncoding 39
+OutputSizeTooSmallForChunk 40
+BadEncodedChunk 41
+NotEnoughSpaceToDecodeChunk 42
+ChunkHasUnknownEncoding 43
+ChunkContainsBadCompressedData 44
+CantWriteToEncodedFileStream 45
+Temp_FileEncodeStreamDidntReadBuffer 46
+CantWriteToDecodedFileStream 47
+WhenDecodingExpectedToReadButCouldnt 48
+BackupStoreFileFailedIntegrityCheck 49
+ThereIsNoDataInASymLink 50
+IVLengthForEncodedBlockSizeDoesntMeetLengthRequirements 51
+BlockEntryEncodingDidntGiveExpectedLength 52
+CouldNotFindUnusedIDDuringAllocation 53
+AddedFileExceedsStorageLimit 54
+CannotDiffAnIncompleteStoreFile 55
+CannotDecodeDiffedFilesWithoutCombining 56
+FailedToReadBlockOnCombine 57
+OnCombineFromFileIsIncomplete 58
+BadNotifySysadminEventCode 59
+InternalAlgorithmErrorCheckIDNotMonotonicallyIncreasing 60
+CouldNotLockStoreAccount 61 Another process is accessing this account -- is a client connected to the server?
+AttributeHashSecretNotSet 62
+AEScipherNotSupportedByInstalledOpenSSL 63 The system needs to be compiled with support for OpenSSL 0.9.7 or later to be able to decode files encrypted with AES
+SignalReceived 64 A signal was received by the process, restart or terminate needed. Exception thrown to abort connection.
+IncompatibleFromAndDiffFiles 65 Attempt to use a diff and a from file together, when they're not related
+DiffFromIDNotFoundInDirectory 66 When uploading via a diff, the diff from file must be in the same directory
+PatchChainInfoBadInDirectory 67 A directory contains inconsistent information. Run bbstoreaccounts check to fix it.
+UnknownObjectRefCountRequested 68 A reference count was requested for an object whose reference count is not known.
+MultiplyReferencedObject 69 Attempted to modify an object with multiple references, should be uncloned first
diff --git a/lib/backupstore/BackupStoreFile.cpp b/lib/backupstore/BackupStoreFile.cpp
new file mode 100644
index 00000000..519305ff
--- /dev/null
+++ b/lib/backupstore/BackupStoreFile.cpp
@@ -0,0 +1,1558 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFile.cpp
+// Purpose: Utils for manipulating files
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#ifdef HAVE_UNISTD_H
+ #include <unistd.h>
+#endif
+
+#include <sys/stat.h>
+#include <string.h>
+#include <new>
+#include <string.h>
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ #include <stdio.h>
+#endif
+
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreFileCryptVar.h"
+#include "BackupStoreFilename.h"
+#include "BackupStoreException.h"
+#include "IOStream.h"
+#include "Guards.h"
+#include "FileModificationTime.h"
+#include "FileStream.h"
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreObjectMagic.h"
+#include "Compress.h"
+#include "CipherContext.h"
+#include "CipherBlowfish.h"
+#include "CipherAES.h"
+#include "BackupStoreConstants.h"
+#include "CollectInBufferStream.h"
+#include "RollingChecksum.h"
+#include "MD5Digest.h"
+#include "ReadGatherStream.h"
+#include "Random.h"
+#include "BackupStoreFileEncodeStream.h"
+#include "Logging.h"
+
+#include "MemLeakFindOn.h"
+
+using namespace BackupStoreFileCryptVar;
+
+// How big a buffer to use for copying files
+#define COPY_BUFFER_SIZE (8*1024)
+
+// Statistics
+BackupStoreFileStats BackupStoreFile::msStats = {0,0,0};
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ bool sWarnedAboutBackwardsCompatiblity = false;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodeFile(IOStream &, IOStream &)
+// Purpose: Encode a file into something for storing on file server.
+// Requires a real filename so full info can be stored.
+//
+// Returns a stream. Most of the work is done by the stream
+// when data is actually requested -- the file will be held
+// open until the stream is deleted or the file finished.
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupStoreFileEncodeStream> BackupStoreFile::EncodeFile(
+ const std::string& Filename, int64_t ContainerID,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t *pModificationTime,
+ ReadLoggingStream::Logger* pLogger,
+ RunStatusProvider* pRunStatusProvider)
+{
+ // Create the stream
+ std::auto_ptr<BackupStoreFileEncodeStream> stream(
+ new BackupStoreFileEncodeStream);
+
+ // Do the initial setup
+ stream->Setup(Filename, 0 /* no recipe, just encode */, ContainerID,
+ rStoreFilename, pModificationTime, pLogger, pRunStatusProvider);
+
+ // Return the stream for the caller
+ return stream;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::VerifyEncodedFileFormat(IOStream &)
+// Purpose: Verify that an encoded file meets the format
+// requirements. Doesn't verify that the data is intact
+// and can be decoded. Optionally returns the ID of the
+// file which it is diffed from, and the (original)
+// container ID.
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFile::VerifyEncodedFileFormat(IOStream &rFile, int64_t *pDiffFromObjectIDOut, int64_t *pContainerIDOut)
+{
+ // Get the size of the file
+ int64_t fileSize = rFile.BytesLeftToRead();
+ if(fileSize == IOStream::SizeOfStreamUnknown)
+ {
+ THROW_EXCEPTION(BackupStoreException, StreamDoesntHaveRequiredFeatures)
+ }
+
+ // Get the header...
+ file_StreamFormat hdr;
+ if(!rFile.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */))
+ {
+ // Couldn't read header
+ return false;
+ }
+
+ // Check magic number
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
+#endif
+ )
+ {
+ return false;
+ }
+
+ // Get a filename, see if it loads OK
+ try
+ {
+ BackupStoreFilename fn;
+ fn.ReadFromStream(rFile, IOStream::TimeOutInfinite);
+ }
+ catch(...)
+ {
+ // an error occured while reading it, so that's not good
+ return false;
+ }
+
+ // Skip the attributes -- because they're encrypted, the server can't tell whether they're OK or not
+ try
+ {
+ int32_t size_s;
+ if(!rFile.ReadFullBuffer(&size_s, sizeof(size_s), 0 /* not interested in bytes read if this fails */))
+ {
+ THROW_EXCEPTION(CommonException, StreamableMemBlockIncompleteRead)
+ }
+ int size = ntohl(size_s);
+ // Skip forward the size
+ rFile.Seek(size, IOStream::SeekType_Relative);
+ }
+ catch(...)
+ {
+ // an error occured while reading it, so that's not good
+ return false;
+ }
+
+ // Get current position in file -- the end of the header
+ int64_t headerEnd = rFile.GetPosition();
+
+ // Get number of blocks
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+
+ // Calculate where the block index will be, check it's reasonable
+ int64_t blockIndexLoc = fileSize - ((numBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader));
+ if(blockIndexLoc < headerEnd)
+ {
+ // Not enough space left for the block index, let alone the blocks themselves
+ return false;
+ }
+
+ // Load the block index header
+ rFile.Seek(blockIndexLoc, IOStream::SeekType_Absolute);
+ file_BlockIndexHeader blkhdr;
+ if(!rFile.ReadFullBuffer(&blkhdr, sizeof(blkhdr), 0 /* not interested in bytes read if this fails */))
+ {
+ // Couldn't read block index header -- assume bad file
+ return false;
+ }
+
+ // Check header
+ if((ntohl(blkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(blkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0
+#endif
+ )
+ || (int64_t)box_ntoh64(blkhdr.mNumBlocks) != numBlocks)
+ {
+ // Bad header -- either magic value or number of blocks is wrong
+ return false;
+ }
+
+ // Flag for recording whether a block is referenced from another file
+ bool blockFromOtherFileReferenced = false;
+
+ // Read the index, checking that the length values all make sense
+ int64_t currentBlockStart = headerEnd;
+ for(int64_t b = 0; b < numBlocks; ++b)
+ {
+ // Read block entry
+ file_BlockIndexEntry blk;
+ if(!rFile.ReadFullBuffer(&blk, sizeof(blk), 0 /* not interested in bytes read if this fails */))
+ {
+ // Couldn't read block index entry -- assume bad file
+ return false;
+ }
+
+ // Check size and location
+ int64_t blkSize = box_ntoh64(blk.mEncodedSize);
+ if(blkSize <= 0)
+ {
+ // Mark that this file references another file
+ blockFromOtherFileReferenced = true;
+ }
+ else
+ {
+ // This block is actually in this file
+ if((currentBlockStart + blkSize) > blockIndexLoc)
+ {
+ // Encoded size makes the block run over the index
+ return false;
+ }
+
+ // Move the current block start ot the end of this block
+ currentBlockStart += blkSize;
+ }
+ }
+
+ // Check that there's no empty space
+ if(currentBlockStart != blockIndexLoc)
+ {
+ return false;
+ }
+
+ // Check that if another block is references, then the ID is there, and if one isn't there is no ID.
+ int64_t otherID = box_ntoh64(blkhdr.mOtherFileID);
+ if((otherID != 0 && blockFromOtherFileReferenced == false)
+ || (otherID == 0 && blockFromOtherFileReferenced == true))
+ {
+ // Doesn't look good!
+ return false;
+ }
+
+ // Does the caller want the other ID?
+ if(pDiffFromObjectIDOut)
+ {
+ *pDiffFromObjectIDOut = otherID;
+ }
+
+ // Does the caller want the container ID?
+ if(pContainerIDOut)
+ {
+ *pContainerIDOut = box_ntoh64(hdr.mContainerID);
+ }
+
+ // Passes all tests
+ return true;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodeFile(IOStream &, const char *)
+// Purpose: Decode a file. Will set file attributes. File must not exist.
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::DecodeFile(IOStream &rEncodedFile, const char *DecodedFilename, int Timeout, const BackupClientFileAttributes *pAlterativeAttr)
+{
+ // Does file exist?
+ EMU_STRUCT_STAT st;
+ if(EMU_STAT(DecodedFilename, &st) == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, OutputFileAlreadyExists)
+ }
+
+ // Try, delete output file if error
+ try
+ {
+ // Make a stream for outputting this file
+ FileStream out(DecodedFilename, O_WRONLY | O_CREAT | O_EXCL);
+
+ // Get the decoding stream
+ std::auto_ptr<DecodedStream> stream(DecodeFileStream(rEncodedFile, Timeout, pAlterativeAttr));
+
+ // Is it a symlink?
+ if(!stream->IsSymLink())
+ {
+ // Copy it out to the file
+ stream->CopyStreamTo(out);
+ }
+
+ out.Close();
+
+ // The stream might have uncertain size, in which case
+ // we need to drain it to get the
+ // Protocol::ProtocolStreamHeader_EndOfStream byte
+ // out of our connection stream.
+ char buffer[1];
+ int drained = rEncodedFile.Read(buffer, 1);
+
+ // The Read will return 0 if we are actually at the end
+ // of the stream, but some tests decode files directly,
+ // in which case we are actually positioned at the start
+ // of the block index. I hope that reading an extra byte
+ // doesn't hurt!
+ // ASSERT(drained == 0);
+
+ // Write the attributes
+ try
+ {
+ stream->GetAttributes().WriteAttributes(DecodedFilename);
+ }
+ catch (std::exception& e)
+ {
+ BOX_WARNING("Failed to restore attributes on " <<
+ DecodedFilename << ": " << e.what());
+ }
+ }
+ catch(...)
+ {
+ ::unlink(DecodedFilename);
+ throw;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodeFileStream(IOStream &, int, const BackupClientFileAttributes *)
+// Purpose: Return a stream which will decode the encrypted file data on the fly.
+// Accepts streams in block index first, or main header first, order. In the latter case,
+// the stream must be Seek()able.
+//
+// Before you use the returned stream, call IsSymLink() -- symlink streams won't allow
+// you to read any data to enforce correct logic. See BackupStoreFile::DecodeFile() implementation.
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupStoreFile::DecodedStream> BackupStoreFile::DecodeFileStream(IOStream &rEncodedFile, int Timeout, const BackupClientFileAttributes *pAlterativeAttr)
+{
+ // Create stream
+ std::auto_ptr<DecodedStream> stream(new DecodedStream(rEncodedFile, Timeout));
+
+ // Get it ready
+ stream->Setup(pAlterativeAttr);
+
+ // Return to caller
+ return stream;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::DecodedStream(IOStream &, int)
+// Purpose: Constructor
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+BackupStoreFile::DecodedStream::DecodedStream(IOStream &rEncodedFile, int Timeout)
+ : mrEncodedFile(rEncodedFile),
+ mTimeout(Timeout),
+ mNumBlocks(0),
+ mpBlockIndex(0),
+ mpEncodedData(0),
+ mpClearData(0),
+ mClearDataSize(0),
+ mCurrentBlock(-1),
+ mCurrentBlockClearSize(0),
+ mPositionInCurrentBlock(0),
+ mEntryIVBase(42) // different to default value in the encoded stream!
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ , mIsOldVersion(false)
+#endif
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::~DecodedStream()
+// Purpose: Desctructor
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+BackupStoreFile::DecodedStream::~DecodedStream()
+{
+ // Free any allocated memory
+ if(mpBlockIndex)
+ {
+ ::free(mpBlockIndex);
+ }
+ if(mpEncodedData)
+ {
+ BackupStoreFile::CodingChunkFree(mpEncodedData);
+ }
+ if(mpClearData)
+ {
+ ::free(mpClearData);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::Setup(const BackupClientFileAttributes *)
+// Purpose: Get the stream ready to decode -- reads in headers
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::DecodedStream::Setup(const BackupClientFileAttributes *pAlterativeAttr)
+{
+ // Get the size of the file
+ int64_t fileSize = mrEncodedFile.BytesLeftToRead();
+
+ // Get the magic number to work out which order the stream is in
+ int32_t magic;
+ if(!mrEncodedFile.ReadFullBuffer(&magic, sizeof(magic), 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read magic value
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+
+ bool inFileOrder = true;
+ switch(ntohl(magic))
+ {
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ case OBJECTMAGIC_FILE_MAGIC_VALUE_V0:
+ mIsOldVersion = true;
+ // control flows on
+#endif
+ case OBJECTMAGIC_FILE_MAGIC_VALUE_V1:
+ inFileOrder = true;
+ break;
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ case OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0:
+ mIsOldVersion = true;
+ // control flows on
+#endif
+ case OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1:
+ inFileOrder = false;
+ break;
+
+ default:
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // If not in file order, then the index list must be read now
+ if(!inFileOrder)
+ {
+ ReadBlockIndex(true /* have already read and verified the magic number */);
+ }
+
+ // Get header
+ file_StreamFormat hdr;
+ if(inFileOrder)
+ {
+ // Read the header, without the magic number
+ if(!mrEncodedFile.ReadFullBuffer(((uint8_t*)&hdr) + sizeof(magic), sizeof(hdr) - sizeof(magic),
+ 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+ // Put in magic number
+ hdr.mMagicValue = magic;
+ }
+ else
+ {
+ // Not in file order, so need to read the full header
+ if(!mrEncodedFile.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+ }
+
+ // Check magic number
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
+#endif
+ )
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Get the filename
+ mFilename.ReadFromStream(mrEncodedFile, mTimeout);
+
+ // Get the attributes (either from stream, or supplied attributes)
+ if(pAlterativeAttr != 0)
+ {
+ // Read dummy attributes
+ BackupClientFileAttributes attr;
+ attr.ReadFromStream(mrEncodedFile, mTimeout);
+
+ // Set to supplied attributes
+ mAttributes = *pAlterativeAttr;
+ }
+ else
+ {
+ // Read the attributes from the stream
+ mAttributes.ReadFromStream(mrEncodedFile, mTimeout);
+ }
+
+ // If it is in file order, go and read the file attributes
+ // Requires that the stream can seek
+ if(inFileOrder)
+ {
+ // Make sure the file size is known
+ if(fileSize == IOStream::SizeOfStreamUnknown)
+ {
+ THROW_EXCEPTION(BackupStoreException, StreamDoesntHaveRequiredFeatures)
+ }
+
+ // Store current location (beginning of encoded blocks)
+ int64_t endOfHeaderPos = mrEncodedFile.GetPosition();
+
+ // Work out where the index is
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+ int64_t blockHeaderPos = fileSize - ((numBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader));
+
+ // Seek to that position
+ mrEncodedFile.Seek(blockHeaderPos, IOStream::SeekType_Absolute);
+
+ // Read the block index
+ ReadBlockIndex(false /* magic number still to be read */);
+
+ // Seek back to the end of header position, ready for reading the chunks
+ mrEncodedFile.Seek(endOfHeaderPos, IOStream::SeekType_Absolute);
+ }
+
+ // Check view of blocks from block header and file header match
+ if(mNumBlocks != (int64_t)box_ntoh64(hdr.mNumBlocks))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Need to allocate some memory for the two blocks for reading encoded data, and clear data
+ if(mNumBlocks > 0)
+ {
+ // Find the maximum encoded data size
+ int32_t maxEncodedDataSize = 0;
+ const file_BlockIndexEntry *entry = (file_BlockIndexEntry *)mpBlockIndex;
+ ASSERT(entry != 0);
+ for(int64_t e = 0; e < mNumBlocks; e++)
+ {
+ // Get the clear and encoded size
+ int32_t encodedSize = box_ntoh64(entry[e].mEncodedSize);
+ ASSERT(encodedSize > 0);
+
+ // Larger?
+ if(encodedSize > maxEncodedDataSize) maxEncodedDataSize = encodedSize;
+ }
+
+ // Allocate those blocks!
+ mpEncodedData = (uint8_t*)BackupStoreFile::CodingChunkAlloc(maxEncodedDataSize + 32);
+
+ // Allocate the block for the clear data, using the hint from the header.
+ // If this is wrong, things will exception neatly later on, so it can't be used
+ // to do anything more than cause an error on downloading.
+ mClearDataSize = OutputBufferSizeForKnownOutputSize(ntohl(hdr.mMaxBlockClearSize)) + 32;
+ mpClearData = (uint8_t*)::malloc(mClearDataSize);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::ReadBlockIndex(bool)
+// Purpose: Read the block index from the stream, and store in internal buffer (minus header)
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::DecodedStream::ReadBlockIndex(bool MagicAlreadyRead)
+{
+ // Header
+ file_BlockIndexHeader blkhdr;
+
+ // Read it in -- way depends on how whether the magic number has already been read
+ if(MagicAlreadyRead)
+ {
+ // Read the header, without the magic number
+ if(!mrEncodedFile.ReadFullBuffer(((uint8_t*)&blkhdr) + sizeof(blkhdr.mMagicValue), sizeof(blkhdr) - sizeof(blkhdr.mMagicValue),
+ 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+ }
+ else
+ {
+ // Magic not already read, so need to read the full header
+ if(!mrEncodedFile.ReadFullBuffer(&blkhdr, sizeof(blkhdr), 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+
+ // Check magic value
+ if(ntohl(blkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(blkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0
+#endif
+ )
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ }
+
+ // Get the number of blocks out of the header
+ mNumBlocks = box_ntoh64(blkhdr.mNumBlocks);
+
+ // Read the IV base
+ mEntryIVBase = box_ntoh64(blkhdr.mEntryIVBase);
+
+ // Load the block entries in?
+ if(mNumBlocks > 0)
+ {
+ // How big is the index?
+ int64_t indexSize = sizeof(file_BlockIndexEntry) * mNumBlocks;
+
+ // Allocate some memory
+ mpBlockIndex = ::malloc(indexSize);
+ if(mpBlockIndex == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ // Read it in
+ if(!mrEncodedFile.ReadFullBuffer(mpBlockIndex, indexSize, 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::Read(void *, int, int)
+// Purpose: As interface. Reads decrpyted data.
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+int BackupStoreFile::DecodedStream::Read(void *pBuffer, int NBytes, int Timeout)
+{
+ // Symlinks don't have data. So can't read it. Not even zero bytes.
+ if(IsSymLink())
+ {
+ // Don't allow reading in this case
+ THROW_EXCEPTION(BackupStoreException, ThereIsNoDataInASymLink);
+ }
+
+ // Already finished?
+ if(mCurrentBlock >= mNumBlocks)
+ {
+ // At end of stream, nothing to do
+ return 0;
+ }
+
+ int bytesToRead = NBytes;
+ uint8_t *output = (uint8_t*)pBuffer;
+
+ while(bytesToRead > 0 && mCurrentBlock < mNumBlocks)
+ {
+ // Anything left in the current block?
+ if(mPositionInCurrentBlock < mCurrentBlockClearSize)
+ {
+ // Copy data out of this buffer
+ int s = mCurrentBlockClearSize - mPositionInCurrentBlock;
+ if(s > bytesToRead) s = bytesToRead; // limit to requested data
+
+ // Copy
+ ::memcpy(output, mpClearData + mPositionInCurrentBlock, s);
+
+ // Update positions
+ output += s;
+ mPositionInCurrentBlock += s;
+ bytesToRead -= s;
+ }
+
+ // Need to get some more data?
+ if(bytesToRead > 0 && mPositionInCurrentBlock >= mCurrentBlockClearSize)
+ {
+ // Number of next block
+ ++mCurrentBlock;
+ if(mCurrentBlock >= mNumBlocks)
+ {
+ // Stop now!
+ break;
+ }
+
+ // Get the size from the block index
+ const file_BlockIndexEntry *entry = (file_BlockIndexEntry *)mpBlockIndex;
+ int32_t encodedSize = box_ntoh64(entry[mCurrentBlock].mEncodedSize);
+ if(encodedSize <= 0)
+ {
+ // The caller is attempting to decode a file which is the direct result of a diff
+ // operation, and so does not contain all the data.
+ // It needs to be combined with the previous version first.
+ THROW_EXCEPTION(BackupStoreException, CannotDecodeDiffedFilesWithoutCombining)
+ }
+
+ // Load in next block
+ if(!mrEncodedFile.ReadFullBuffer(mpEncodedData, encodedSize, 0 /* not interested in bytes read if this fails */, mTimeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, WhenDecodingExpectedToReadButCouldnt)
+ }
+
+ // Decode the data
+ mCurrentBlockClearSize = BackupStoreFile::DecodeChunk(mpEncodedData, encodedSize, mpClearData, mClearDataSize);
+
+ // Calculate IV for this entry
+ uint64_t iv = mEntryIVBase;
+ iv += mCurrentBlock;
+ // Convert to network byte order before encrypting with it, so that restores work on
+ // platforms with different endiannesses.
+ iv = box_hton64(iv);
+ sBlowfishDecryptBlockEntry.SetIV(&iv);
+
+ // Decrypt the encrypted section
+ file_BlockIndexEntryEnc entryEnc;
+ int sectionSize = sBlowfishDecryptBlockEntry.TransformBlock(&entryEnc, sizeof(entryEnc),
+ entry[mCurrentBlock].mEnEnc, sizeof(entry[mCurrentBlock].mEnEnc));
+ if(sectionSize != sizeof(entryEnc))
+ {
+ THROW_EXCEPTION(BackupStoreException, BlockEntryEncodingDidntGiveExpectedLength)
+ }
+
+ // Make sure this is the right size
+ if(mCurrentBlockClearSize != (int32_t)ntohl(entryEnc.mSize))
+ {
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ if(!mIsOldVersion)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Versions 0.05 and previous of Box Backup didn't properly handle endianess of the
+ // IV for the encrypted section. Try again, with the thing the other way round
+ iv = box_swap64(iv);
+ sBlowfishDecryptBlockEntry.SetIV(&iv);
+ int sectionSize = sBlowfishDecryptBlockEntry.TransformBlock(&entryEnc, sizeof(entryEnc),
+ entry[mCurrentBlock].mEnEnc, sizeof(entry[mCurrentBlock].mEnEnc));
+ if(sectionSize != sizeof(entryEnc))
+ {
+ THROW_EXCEPTION(BackupStoreException, BlockEntryEncodingDidntGiveExpectedLength)
+ }
+ if(mCurrentBlockClearSize != (int32_t)ntohl(entryEnc.mSize))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ else
+ {
+ // Warn and log this issue
+ if(!sWarnedAboutBackwardsCompatiblity)
+ {
+ BOX_WARNING("WARNING: Decoded one or more files using backwards compatibility mode for block index.");
+ sWarnedAboutBackwardsCompatiblity = true;
+ }
+ }
+#else
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+#endif
+ }
+
+ // Check the digest
+ MD5Digest md5;
+ md5.Add(mpClearData, mCurrentBlockClearSize);
+ md5.Finish();
+ if(!md5.DigestMatches((uint8_t*)entryEnc.mStrongChecksum))
+ {
+ THROW_EXCEPTION(BackupStoreException, BackupStoreFileFailedIntegrityCheck)
+ }
+
+ // Set vars to say what's happening
+ mPositionInCurrentBlock = 0;
+ }
+ }
+
+ ASSERT(bytesToRead >= 0);
+ ASSERT(bytesToRead <= NBytes);
+
+ return NBytes - bytesToRead;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::IsSymLink()
+// Purpose: Is the unencoded file actually a symlink?
+// Created: 10/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFile::DecodedStream::IsSymLink()
+{
+ // First, check in with the attributes
+ if(!mAttributes.IsSymLink())
+ {
+ return false;
+ }
+
+ // So the attributes think it is a symlink.
+ // Consistency check...
+ if(mNumBlocks != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::Write(const void *, int)
+// Purpose: As interface. Throws exception, as you can't write to this stream.
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::DecodedStream::Write(const void *pBuffer, int NBytes)
+{
+ THROW_EXCEPTION(BackupStoreException, CantWriteToDecodedFileStream)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::StreamDataLeft()
+// Purpose: As interface. Any data left?
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFile::DecodedStream::StreamDataLeft()
+{
+ return mCurrentBlock < mNumBlocks;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodedStream::StreamClosed()
+// Purpose: As interface. Always returns true, no writing allowed.
+// Created: 9/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFile::DecodedStream::StreamClosed()
+{
+ // Can't write to this stream!
+ return true;
+}
+
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::SetBlowfishKey(const void *, int)
+// Purpose: Static. Sets the key to use for encryption and decryption.
+// Created: 7/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::SetBlowfishKeys(const void *pKey, int KeyLength, const void *pBlockEntryKey, int BlockEntryKeyLength)
+{
+ // IVs set later
+ sBlowfishEncrypt.Reset();
+ sBlowfishEncrypt.Init(CipherContext::Encrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+ sBlowfishDecrypt.Reset();
+ sBlowfishDecrypt.Init(CipherContext::Decrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+
+ sBlowfishEncryptBlockEntry.Reset();
+ sBlowfishEncryptBlockEntry.Init(CipherContext::Encrypt, CipherBlowfish(CipherDescription::Mode_CBC, pBlockEntryKey, BlockEntryKeyLength));
+ sBlowfishEncryptBlockEntry.UsePadding(false);
+ sBlowfishDecryptBlockEntry.Reset();
+ sBlowfishDecryptBlockEntry.Init(CipherContext::Decrypt, CipherBlowfish(CipherDescription::Mode_CBC, pBlockEntryKey, BlockEntryKeyLength));
+ sBlowfishDecryptBlockEntry.UsePadding(false);
+}
+
+
+#ifndef HAVE_OLD_SSL
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::SetAESKey(const void *, int)
+// Purpose: Sets the AES key to use for file data encryption. Will select AES as
+// the cipher to use when encrypting.
+// Created: 27/4/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::SetAESKey(const void *pKey, int KeyLength)
+{
+ // Setup context
+ sAESEncrypt.Reset();
+ sAESEncrypt.Init(CipherContext::Encrypt, CipherAES(CipherDescription::Mode_CBC, pKey, KeyLength));
+ sAESDecrypt.Reset();
+ sAESDecrypt.Init(CipherContext::Decrypt, CipherAES(CipherDescription::Mode_CBC, pKey, KeyLength));
+
+ // Set encryption to use this key, instead of the "default" blowfish key
+ spEncrypt = &sAESEncrypt;
+ sEncryptCipherType = HEADER_AES_ENCODING;
+}
+#endif
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::MaxBlockSizeForChunkSize(int)
+// Purpose: The maximum output size of a block, given the chunk size
+// Created: 7/12/03
+//
+// --------------------------------------------------------------------------
+int BackupStoreFile::MaxBlockSizeForChunkSize(int ChunkSize)
+{
+ // Calculate... the maximum size of output by first the largest it could be after compression,
+ // which is encrypted, and has a 1 bytes header and the IV added, plus 1 byte for luck
+ // And then on top, add 128 bytes just to make sure. (Belts and braces approach to fixing
+ // an problem where a rather non-compressable file didn't fit in a block buffer.)
+ return sBlowfishEncrypt.MaxOutSizeForInBufferSize(Compress_MaxSizeForCompressedData(ChunkSize)) + 1 + 1
+ + sBlowfishEncrypt.GetIVLength() + 128;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodeChunk(const void *, int, BackupStoreFile::EncodingBuffer &)
+// Purpose: Encodes a chunk (encryption, possible compressed beforehand)
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+int BackupStoreFile::EncodeChunk(const void *Chunk, int ChunkSize, BackupStoreFile::EncodingBuffer &rOutput)
+{
+ ASSERT(spEncrypt != 0);
+
+ // Check there's some space in the output block
+ if(rOutput.mBufferSize < 256)
+ {
+ rOutput.Reallocate(256);
+ }
+
+ // Check alignment of the block
+ ASSERT((((uint32_t)(long)rOutput.mpBuffer) % BACKUPSTOREFILE_CODING_BLOCKSIZE) == BACKUPSTOREFILE_CODING_OFFSET);
+
+ // Want to compress it?
+ bool compressChunk = (ChunkSize >= BACKUP_FILE_MIN_COMPRESSED_CHUNK_SIZE);
+
+ // Build header
+ uint8_t header = sEncryptCipherType << HEADER_ENCODING_SHIFT;
+ if(compressChunk) header |= HEADER_CHUNK_IS_COMPRESSED;
+
+ // Store header
+ rOutput.mpBuffer[0] = header;
+ int outOffset = 1;
+
+ // Setup cipher, and store the IV
+ int ivLen = 0;
+ const void *iv = spEncrypt->SetRandomIV(ivLen);
+ ::memcpy(rOutput.mpBuffer + outOffset, iv, ivLen);
+ outOffset += ivLen;
+
+ // Start encryption process
+ spEncrypt->Begin();
+
+ #define ENCODECHUNK_CHECK_SPACE(ToEncryptSize) \
+ { \
+ if((rOutput.mBufferSize - outOffset) < ((ToEncryptSize) + 128)) \
+ { \
+ rOutput.Reallocate(rOutput.mBufferSize + (ToEncryptSize) + 128); \
+ } \
+ }
+
+ // Encode the chunk
+ if(compressChunk)
+ {
+ // buffer to compress into
+ uint8_t buffer[2048];
+
+ // Set compressor with all the chunk as an input
+ Compress<true> compress;
+ compress.Input(Chunk, ChunkSize);
+ compress.FinishInput();
+
+ // Get and encrypt output
+ while(!compress.OutputHasFinished())
+ {
+ int s = compress.Output(buffer, sizeof(buffer));
+ if(s > 0)
+ {
+ ENCODECHUNK_CHECK_SPACE(s)
+ outOffset += spEncrypt->Transform(rOutput.mpBuffer + outOffset, rOutput.mBufferSize - outOffset, buffer, s);
+ }
+ else
+ {
+ // Should never happen, as we put all the input in in one go.
+ // So if this happens, it means there's a logical problem somewhere
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+ }
+ ENCODECHUNK_CHECK_SPACE(16)
+ outOffset += spEncrypt->Final(rOutput.mpBuffer + outOffset, rOutput.mBufferSize - outOffset);
+ }
+ else
+ {
+ // Straight encryption
+ ENCODECHUNK_CHECK_SPACE(ChunkSize)
+ outOffset += spEncrypt->Transform(rOutput.mpBuffer + outOffset, rOutput.mBufferSize - outOffset, Chunk, ChunkSize);
+ ENCODECHUNK_CHECK_SPACE(16)
+ outOffset += spEncrypt->Final(rOutput.mpBuffer + outOffset, rOutput.mBufferSize - outOffset);
+ }
+
+ ASSERT(outOffset < rOutput.mBufferSize); // first check should have sorted this -- merely logic check
+
+ return outOffset;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::DecodeChunk(const void *, int, void *, int)
+// Purpose: Decode an encoded chunk -- use OutputBufferSizeForKnownOutputSize() to find
+// the extra output buffer size needed before calling.
+// See notes in EncodeChunk() for notes re alignment of the
+// encoded data.
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+int BackupStoreFile::DecodeChunk(const void *Encoded, int EncodedSize, void *Output, int OutputSize)
+{
+ // Check alignment of the encoded block
+ ASSERT((((uint32_t)(long)Encoded) % BACKUPSTOREFILE_CODING_BLOCKSIZE) == BACKUPSTOREFILE_CODING_OFFSET);
+
+ // First check
+ if(EncodedSize < 1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadEncodedChunk)
+ }
+
+ const uint8_t *input = (uint8_t*)Encoded;
+
+ // Get header, make checks, etc
+ uint8_t header = input[0];
+ bool chunkCompressed = (header & HEADER_CHUNK_IS_COMPRESSED) == HEADER_CHUNK_IS_COMPRESSED;
+ uint8_t encodingType = (header >> HEADER_ENCODING_SHIFT);
+ if(encodingType != HEADER_BLOWFISH_ENCODING && encodingType != HEADER_AES_ENCODING)
+ {
+ THROW_EXCEPTION(BackupStoreException, ChunkHasUnknownEncoding)
+ }
+
+#ifndef HAVE_OLD_SSL
+ // Choose cipher
+ CipherContext &cipher((encodingType == HEADER_AES_ENCODING)?sAESDecrypt:sBlowfishDecrypt);
+#else
+ // AES not supported with this version of OpenSSL
+ if(encodingType == HEADER_AES_ENCODING)
+ {
+ THROW_EXCEPTION(BackupStoreException, AEScipherNotSupportedByInstalledOpenSSL)
+ }
+ CipherContext &cipher(sBlowfishDecrypt);
+#endif
+
+ // Check enough space for header, an IV and one byte of input
+ int ivLen = cipher.GetIVLength();
+ if(EncodedSize < (1 + ivLen + 1))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadEncodedChunk)
+ }
+
+ // Set IV in decrypt context, and start
+ cipher.SetIV(input + 1);
+ cipher.Begin();
+
+ // Setup vars for code
+ int inOffset = 1 + ivLen;
+ uint8_t *output = (uint8_t*)Output;
+ int outOffset = 0;
+
+ // Do action
+ if(chunkCompressed)
+ {
+ // Do things in chunks
+ uint8_t buffer[2048];
+ int inputBlockLen = cipher.InSizeForOutBufferSize(sizeof(buffer));
+
+ // Decompressor
+ Compress<false> decompress;
+
+ while(inOffset < EncodedSize)
+ {
+ // Decrypt a block
+ int bl = inputBlockLen;
+ if(bl > (EncodedSize - inOffset)) bl = EncodedSize - inOffset; // not too long
+ int s = cipher.Transform(buffer, sizeof(buffer), input + inOffset, bl);
+ inOffset += bl;
+
+ // Decompress the decrypted data
+ if(s > 0)
+ {
+ decompress.Input(buffer, s);
+ int os = 0;
+ do
+ {
+ os = decompress.Output(output + outOffset, OutputSize - outOffset);
+ outOffset += os;
+ } while(os > 0);
+
+ // Check that there's space left in the output buffer -- there always should be
+ if(outOffset >= OutputSize)
+ {
+ THROW_EXCEPTION(BackupStoreException, NotEnoughSpaceToDecodeChunk)
+ }
+ }
+ }
+
+ // Get any compressed data remaining in the cipher context and compression
+ int s = cipher.Final(buffer, sizeof(buffer));
+ decompress.Input(buffer, s);
+ decompress.FinishInput();
+ while(!decompress.OutputHasFinished())
+ {
+ int os = decompress.Output(output + outOffset, OutputSize - outOffset);
+ outOffset += os;
+
+ // Check that there's space left in the output buffer -- there always should be
+ if(outOffset >= OutputSize)
+ {
+ THROW_EXCEPTION(BackupStoreException, NotEnoughSpaceToDecodeChunk)
+ }
+ }
+ }
+ else
+ {
+ // Easy decryption
+ outOffset += cipher.Transform(output + outOffset, OutputSize - outOffset, input + inOffset, EncodedSize - inOffset);
+ outOffset += cipher.Final(output + outOffset, OutputSize - outOffset);
+ }
+
+ return outOffset;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::ReorderFileToStreamOrder(IOStream *, bool)
+// Purpose: Returns a stream which gives a Stream order version of the encoded file.
+// If TakeOwnership == true, then the input stream will be deleted when the
+// returned stream is deleted.
+// The input stream must be seekable.
+// Created: 10/12/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<IOStream> BackupStoreFile::ReorderFileToStreamOrder(IOStream *pStream, bool TakeOwnership)
+{
+ ASSERT(pStream != 0);
+
+ // Get the size of the file
+ int64_t fileSize = pStream->BytesLeftToRead();
+ if(fileSize == IOStream::SizeOfStreamUnknown)
+ {
+ THROW_EXCEPTION(BackupStoreException, StreamDoesntHaveRequiredFeatures)
+ }
+
+ // Read the header
+ int bytesRead = 0;
+ file_StreamFormat hdr;
+ bool readBlock = pStream->ReadFullBuffer(&hdr, sizeof(hdr), &bytesRead);
+
+ // Seek backwards to put the file pointer back where it was before we started this
+ pStream->Seek(0 - bytesRead, IOStream::SeekType_Relative);
+
+ // Check we got a block
+ if(!readBlock)
+ {
+ // Couldn't read header -- assume file bad
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Check magic number
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
+#endif
+ )
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Get number of blocks
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+
+ // Calculate where the block index will be, check it's reasonable
+ int64_t blockIndexSize = ((numBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader));
+ int64_t blockIndexLoc = fileSize - blockIndexSize;
+ if(blockIndexLoc < 0)
+ {
+ // Doesn't look good!
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Build a reordered stream
+ std::auto_ptr<IOStream> reordered(new ReadGatherStream(TakeOwnership));
+
+ // Set it up...
+ ReadGatherStream &rreordered(*((ReadGatherStream*)reordered.get()));
+ int component = rreordered.AddComponent(pStream);
+ // Send out the block index
+ rreordered.AddBlock(component, blockIndexSize, true, blockIndexLoc);
+ // And then the rest of the file
+ rreordered.AddBlock(component, blockIndexLoc, true, 0);
+
+ return reordered;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::ResetStats()
+// Purpose: Reset the gathered statistics
+// Created: 20/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::ResetStats()
+{
+ msStats.mBytesInEncodedFiles = 0;
+ msStats.mBytesAlreadyOnServer = 0;
+ msStats.mTotalFileStreamSize = 0;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::CompareFileContentsAgainstBlockIndex(const char *, IOStream &)
+// Purpose: Compares the contents of a file against the checksums contained in the
+// block index. Returns true if the checksums match, meaning the file is
+// extremely likely to match the original. Will always consume the entire index.
+// Created: 21/1/04
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFile::CompareFileContentsAgainstBlockIndex(const char *Filename, IOStream &rBlockIndex, int Timeout)
+{
+ // is it a symlink?
+ bool sourceIsSymlink = false;
+ {
+ EMU_STRUCT_STAT st;
+ if(EMU_LSTAT(Filename, &st) == -1)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ if((st.st_mode & S_IFMT) == S_IFLNK)
+ {
+ sourceIsSymlink = true;
+ }
+ }
+
+ // Open file, if it's not a symlink
+ std::auto_ptr<FileStream> in;
+ if(!sourceIsSymlink)
+ {
+ in.reset(new FileStream(Filename));
+ }
+
+ // Read header
+ file_BlockIndexHeader hdr;
+ if(!rBlockIndex.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Check magic
+ if(hdr.mMagicValue != (int32_t)htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && hdr.mMagicValue != (int32_t)htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0)
+#endif
+ )
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ bool isOldVersion = hdr.mMagicValue == (int32_t)htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0);
+#endif
+
+ // Get basic information
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+ uint64_t entryIVBase = box_ntoh64(hdr.mEntryIVBase);
+
+ //TODO: Verify that these sizes look reasonable
+
+ // setup
+ void *data = 0;
+ int32_t dataSize = -1;
+ bool matches = true;
+ int64_t totalSizeInBlockIndex = 0;
+
+ try
+ {
+ for(int64_t b = 0; b < numBlocks; ++b)
+ {
+ // Read an entry from the stream
+ file_BlockIndexEntry entry;
+ if(!rBlockIndex.ReadFullBuffer(&entry, sizeof(entry), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ // Couldn't read entry
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Calculate IV for this entry
+ uint64_t iv = entryIVBase;
+ iv += b;
+ iv = box_hton64(iv);
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ if(isOldVersion)
+ {
+ // Reverse the IV for compatibility
+ iv = box_swap64(iv);
+ }
+#endif
+ sBlowfishDecryptBlockEntry.SetIV(&iv);
+
+ // Decrypt the encrypted section
+ file_BlockIndexEntryEnc entryEnc;
+ int sectionSize = sBlowfishDecryptBlockEntry.TransformBlock(&entryEnc, sizeof(entryEnc),
+ entry.mEnEnc, sizeof(entry.mEnEnc));
+ if(sectionSize != sizeof(entryEnc))
+ {
+ THROW_EXCEPTION(BackupStoreException, BlockEntryEncodingDidntGiveExpectedLength)
+ }
+
+ // Size of block
+ int32_t blockClearSize = ntohl(entryEnc.mSize);
+ if(blockClearSize < 0 || blockClearSize > (BACKUP_FILE_MAX_BLOCK_SIZE + 1024))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ totalSizeInBlockIndex += blockClearSize;
+
+ // Make sure there's enough memory allocated to load the block in
+ if(dataSize < blockClearSize)
+ {
+ // Too small, free the block if it's already allocated
+ if(data != 0)
+ {
+ ::free(data);
+ data = 0;
+ }
+ // Allocate a block
+ data = ::malloc(blockClearSize + 128);
+ if(data == 0)
+ {
+ throw std::bad_alloc();
+ }
+ dataSize = blockClearSize + 128;
+ }
+
+ // Load in the block from the file, if it's not a symlink
+ if(!sourceIsSymlink)
+ {
+ if(in->Read(data, blockClearSize) != blockClearSize)
+ {
+ // Not enough data left in the file, can't possibly match
+ matches = false;
+ }
+ else
+ {
+ // Check the checksum
+ MD5Digest md5;
+ md5.Add(data, blockClearSize);
+ md5.Finish();
+ if(!md5.DigestMatches(entryEnc.mStrongChecksum))
+ {
+ // Checksum didn't match
+ matches = false;
+ }
+ }
+ }
+
+ // Keep on going regardless, to make sure the entire block index stream is read
+ // -- must always be consistent about what happens with the stream.
+ }
+ }
+ catch(...)
+ {
+ // clean up in case of errors
+ if(data != 0)
+ {
+ ::free(data);
+ data = 0;
+ }
+ throw;
+ }
+
+ // free block
+ if(data != 0)
+ {
+ ::free(data);
+ data = 0;
+ }
+
+ // Check for data left over if it's not a symlink
+ if(!sourceIsSymlink)
+ {
+ // Anything left to read in the file?
+ if(in->BytesLeftToRead() != 0)
+ {
+ // File has extra data at the end
+ matches = false;
+ }
+ }
+
+ // Symlinks must have zero size on server
+ if(sourceIsSymlink)
+ {
+ matches = (totalSizeInBlockIndex == 0);
+ }
+
+ return matches;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodingBuffer::EncodingBuffer()
+// Purpose: Constructor
+// Created: 25/11/04
+//
+// --------------------------------------------------------------------------
+BackupStoreFile::EncodingBuffer::EncodingBuffer()
+ : mpBuffer(0),
+ mBufferSize(0)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodingBuffer::~EncodingBuffer()
+// Purpose: Destructor
+// Created: 25/11/04
+//
+// --------------------------------------------------------------------------
+BackupStoreFile::EncodingBuffer::~EncodingBuffer()
+{
+ if(mpBuffer != 0)
+ {
+ BackupStoreFile::CodingChunkFree(mpBuffer);
+ mpBuffer = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodingBuffer::Allocate(int)
+// Purpose: Do initial allocation of block
+// Created: 25/11/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::EncodingBuffer::Allocate(int Size)
+{
+ ASSERT(mpBuffer == 0);
+ uint8_t *buffer = (uint8_t*)BackupStoreFile::CodingChunkAlloc(Size);
+ if(buffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+ mpBuffer = buffer;
+ mBufferSize = Size;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodingBuffer::Reallocate(int)
+// Purpose: Reallocate the block. Try not to call this, it has to copy
+// the entire contents as the block can't be reallocated straight.
+// Created: 25/11/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::EncodingBuffer::Reallocate(int NewSize)
+{
+ BOX_TRACE("Reallocating EncodingBuffer from " << mBufferSize <<
+ " to " << NewSize);
+ ASSERT(mpBuffer != 0);
+ uint8_t *buffer = (uint8_t*)BackupStoreFile::CodingChunkAlloc(NewSize);
+ if(buffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+ // Copy data
+ ::memcpy(buffer, mpBuffer, (NewSize > mBufferSize)?mBufferSize:NewSize);
+
+ // Free old
+ BackupStoreFile::CodingChunkFree(mpBuffer);
+
+ // Store new buffer
+ mpBuffer = buffer;
+ mBufferSize = NewSize;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: DiffTimer::DiffTimer();
+// Purpose: Constructor
+// Created: 2005/02/01
+//
+// --------------------------------------------------------------------------
+DiffTimer::DiffTimer()
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: DiffTimer::DiffTimer();
+// Purpose: Destructor
+// Created: 2005/02/01
+//
+// --------------------------------------------------------------------------
+DiffTimer::~DiffTimer()
+{
+}
diff --git a/lib/backupstore/BackupStoreFile.h b/lib/backupstore/BackupStoreFile.h
new file mode 100644
index 00000000..7c72e010
--- /dev/null
+++ b/lib/backupstore/BackupStoreFile.h
@@ -0,0 +1,234 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFile.h
+// Purpose: Utils for manipulating files
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILE__H
+#define BACKUPSTOREFILE__H
+
+#include <cstdlib>
+#include <memory>
+#include <cstdlib>
+
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreFilename.h"
+#include "IOStream.h"
+#include "ReadLoggingStream.h"
+
+typedef struct
+{
+ int64_t mBytesInEncodedFiles;
+ int64_t mBytesAlreadyOnServer;
+ int64_t mTotalFileStreamSize;
+} BackupStoreFileStats;
+
+class RunStatusProvider;
+
+// Uncomment to disable backwards compatibility
+//#define BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+
+
+// Output buffer to EncodeChunk and input data to DecodeChunk must
+// have specific alignment, see function comments.
+#define BACKUPSTOREFILE_CODING_BLOCKSIZE 16
+#define BACKUPSTOREFILE_CODING_OFFSET 15
+
+// Have some memory allocation commands, note closing "Off" at end of file.
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: DiffTimer
+// Purpose: Interface for classes that can keep track of diffing time,
+// and send SSL keepalive messages
+// Created: 2006/01/19
+//
+// --------------------------------------------------------------------------
+class DiffTimer
+{
+public:
+ DiffTimer();
+ virtual ~DiffTimer();
+public:
+ virtual void DoKeepAlive() = 0;
+ virtual int GetMaximumDiffingTime() = 0;
+ virtual bool IsManaged() = 0;
+};
+
+class BackupStoreFileEncodeStream;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreFile
+// Purpose: Class to hold together utils for manipulating files.
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+class BackupStoreFile
+{
+public:
+ class DecodedStream : public IOStream
+ {
+ friend class BackupStoreFile;
+ private:
+ DecodedStream(IOStream &rEncodedFile, int Timeout);
+ DecodedStream(const DecodedStream &); // not allowed
+ DecodedStream &operator=(const DecodedStream &); // not allowed
+ public:
+ ~DecodedStream();
+
+ // Stream functions
+ virtual int Read(void *pBuffer, int NBytes, int Timeout);
+ virtual void Write(const void *pBuffer, int NBytes);
+ virtual bool StreamDataLeft();
+ virtual bool StreamClosed();
+
+ // Accessor functions
+ const BackupClientFileAttributes &GetAttributes() {return mAttributes;}
+ const BackupStoreFilename &GetFilename() {return mFilename;}
+ int64_t GetNumBlocks() {return mNumBlocks;} // primarily for tests
+
+ bool IsSymLink();
+
+ private:
+ void Setup(const BackupClientFileAttributes *pAlterativeAttr);
+ void ReadBlockIndex(bool MagicAlreadyRead);
+
+ private:
+ IOStream &mrEncodedFile;
+ int mTimeout;
+ BackupClientFileAttributes mAttributes;
+ BackupStoreFilename mFilename;
+ int64_t mNumBlocks;
+ void *mpBlockIndex;
+ uint8_t *mpEncodedData;
+ uint8_t *mpClearData;
+ int mClearDataSize;
+ int mCurrentBlock;
+ int mCurrentBlockClearSize;
+ int mPositionInCurrentBlock;
+ uint64_t mEntryIVBase;
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ bool mIsOldVersion;
+#endif
+ };
+
+
+ // Main interface
+ static std::auto_ptr<BackupStoreFileEncodeStream> EncodeFile
+ (
+ const std::string& Filename,
+ int64_t ContainerID, const BackupStoreFilename &rStoreFilename,
+ int64_t *pModificationTime = 0,
+ ReadLoggingStream::Logger* pLogger = NULL,
+ RunStatusProvider* pRunStatusProvider = NULL
+ );
+ static std::auto_ptr<BackupStoreFileEncodeStream> EncodeFileDiff
+ (
+ const std::string& Filename, int64_t ContainerID,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t DiffFromObjectID, IOStream &rDiffFromBlockIndex,
+ int Timeout,
+ DiffTimer *pDiffTimer,
+ int64_t *pModificationTime = 0,
+ bool *pIsCompletelyDifferent = 0
+ );
+ static bool VerifyEncodedFileFormat(IOStream &rFile, int64_t *pDiffFromObjectIDOut = 0, int64_t *pContainerIDOut = 0);
+ static void CombineFile(IOStream &rDiff, IOStream &rDiff2, IOStream &rFrom, IOStream &rOut);
+ static void CombineDiffs(IOStream &rDiff1, IOStream &rDiff2, IOStream &rDiff2b, IOStream &rOut);
+ static void ReverseDiffFile(IOStream &rDiff, IOStream &rFrom, IOStream &rFrom2, IOStream &rOut, int64_t ObjectIDOfFrom, bool *pIsCompletelyDifferent = 0);
+ static void DecodeFile(IOStream &rEncodedFile, const char *DecodedFilename, int Timeout, const BackupClientFileAttributes *pAlterativeAttr = 0);
+ static std::auto_ptr<BackupStoreFile::DecodedStream> DecodeFileStream(IOStream &rEncodedFile, int Timeout, const BackupClientFileAttributes *pAlterativeAttr = 0);
+ static bool CompareFileContentsAgainstBlockIndex(const char *Filename, IOStream &rBlockIndex, int Timeout);
+ static std::auto_ptr<IOStream> CombineFileIndices(IOStream &rDiff, IOStream &rFrom, bool DiffIsIndexOnly = false, bool FromIsIndexOnly = false);
+
+ // Stream manipulation
+ static std::auto_ptr<IOStream> ReorderFileToStreamOrder(IOStream *pStream, bool TakeOwnership);
+ static void MoveStreamPositionToBlockIndex(IOStream &rStream);
+
+ // Crypto setup
+ static void SetBlowfishKeys(const void *pKey, int KeyLength, const void *pBlockEntryKey, int BlockEntryKeyLength);
+#ifndef HAVE_OLD_SSL
+ static void SetAESKey(const void *pKey, int KeyLength);
+#endif
+
+ // Allocation of properly aligning chunks for decoding and encoding chunks
+ inline static void *CodingChunkAlloc(int Size)
+ {
+ uint8_t *a = (uint8_t*)malloc((Size) + (BACKUPSTOREFILE_CODING_BLOCKSIZE * 3));
+ if(a == 0) return 0;
+ // Align to main block size
+ ASSERT(sizeof(uint64_t) >= sizeof(void*)); // make sure casting the right pointer size
+ uint8_t adjustment = BACKUPSTOREFILE_CODING_BLOCKSIZE
+ - (uint8_t)(((uint64_t)a) % BACKUPSTOREFILE_CODING_BLOCKSIZE);
+ uint8_t *b = (a + adjustment);
+ // Store adjustment
+ *b = adjustment;
+ // Return offset
+ return b + BACKUPSTOREFILE_CODING_OFFSET;
+ }
+ inline static void CodingChunkFree(void *Block)
+ {
+ // Check alignment is as expected
+ ASSERT(sizeof(uint64_t) >= sizeof(void*)); // make sure casting the right pointer size
+ ASSERT((uint8_t)(((uint64_t)Block) % BACKUPSTOREFILE_CODING_BLOCKSIZE) == BACKUPSTOREFILE_CODING_OFFSET);
+ uint8_t *a = (uint8_t*)Block;
+ a -= BACKUPSTOREFILE_CODING_OFFSET;
+ // Adjust downwards...
+ a -= *a;
+ free(a);
+ }
+
+ static void DiffTimerExpired();
+
+ // Building blocks
+ class EncodingBuffer
+ {
+ public:
+ EncodingBuffer();
+ ~EncodingBuffer();
+ private:
+ // No copying
+ EncodingBuffer(const EncodingBuffer &);
+ EncodingBuffer &operator=(const EncodingBuffer &);
+ public:
+ void Allocate(int Size);
+ void Reallocate(int NewSize);
+
+ uint8_t *mpBuffer;
+ int mBufferSize;
+ };
+ static int MaxBlockSizeForChunkSize(int ChunkSize);
+ static int EncodeChunk(const void *Chunk, int ChunkSize, BackupStoreFile::EncodingBuffer &rOutput);
+
+ // Caller should know how big the output size is, but also allocate a bit more memory to cover various
+ // overheads allowed for in checks
+ static inline int OutputBufferSizeForKnownOutputSize(int KnownChunkSize)
+ {
+ // Plenty big enough
+ return KnownChunkSize + 256;
+ }
+ static int DecodeChunk(const void *Encoded, int EncodedSize, void *Output, int OutputSize);
+
+ // Statisitics, not designed to be completely reliable
+ static void ResetStats();
+ static BackupStoreFileStats msStats;
+
+ // For debug
+#ifndef BOX_RELEASE_BUILD
+ static bool TraceDetailsOfDiffProcess;
+#endif
+
+ // For decoding encoded files
+ static void DumpFile(void *clibFileHandle, bool ToTrace, IOStream &rFile);
+};
+
+#include "MemLeakFindOff.h"
+
+#endif // BACKUPSTOREFILE__H
diff --git a/lib/backupstore/BackupStoreFileCmbDiff.cpp b/lib/backupstore/BackupStoreFileCmbDiff.cpp
new file mode 100644
index 00000000..1a88fa3f
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileCmbDiff.cpp
@@ -0,0 +1,326 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileCmbDiff.cpp
+// Purpose: Combine two diffs together
+// Created: 12/7/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <new>
+#include <stdlib.h>
+
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreFilename.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::CombineDiffs(IOStream &, IOStream &, IOStream &rOut)
+// Purpose: Given two diffs, combine them into a single diff, to produce a diff
+// which, combined with the original file, creates the result of applying
+// rDiff, then rDiff2. Two opens of rDiff2 are required
+// Created: 12/7/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::CombineDiffs(IOStream &rDiff1, IOStream &rDiff2, IOStream &rDiff2b, IOStream &rOut)
+{
+ // Skip header of first diff, record where the data starts, and skip to the index
+ int64_t diff1DataStarts = 0;
+ {
+ // Read the header for the From file
+ file_StreamFormat diff1Hdr;
+ if(!rDiff1.ReadFullBuffer(&diff1Hdr, sizeof(diff1Hdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(diff1Hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Skip over the filename and attributes of the From file
+ // BLOCK
+ {
+ BackupStoreFilename filename2;
+ filename2.ReadFromStream(rDiff1, IOStream::TimeOutInfinite);
+ int32_t size_s;
+ if(!rDiff1.ReadFullBuffer(&size_s, sizeof(size_s), 0 /* not interested in bytes read if this fails */))
+ {
+ THROW_EXCEPTION(CommonException, StreamableMemBlockIncompleteRead)
+ }
+ int size = ntohl(size_s);
+ // Skip forward the size
+ rDiff1.Seek(size, IOStream::SeekType_Relative);
+ }
+ // Record position
+ diff1DataStarts = rDiff1.GetPosition();
+ // Skip to index
+ rDiff1.Seek(0 - (((box_ntoh64(diff1Hdr.mNumBlocks)) * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader)), IOStream::SeekType_End);
+ }
+
+ // Read the index of the first diff
+ // Header first
+ file_BlockIndexHeader diff1IdxHdr;
+ if(!rDiff1.ReadFullBuffer(&diff1IdxHdr, sizeof(diff1IdxHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(diff1IdxHdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ int64_t diff1NumBlocks = box_ntoh64(diff1IdxHdr.mNumBlocks);
+ // Allocate some memory
+ int64_t *diff1BlockStartPositions = (int64_t*)::malloc((diff1NumBlocks + 1) * sizeof(int64_t));
+ if(diff1BlockStartPositions == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ // Buffer data
+ void *buffer = 0;
+ int bufferSize = 0;
+
+ try
+ {
+ // Then the entries:
+ // For each entry, want to know if it's in the file, and if so, how big it is.
+ // We'll store this as an array of file positions in the file, with an additioal
+ // entry on the end so that we can work out the length of the last block.
+ // If an entry isn't in the file, then store 0 - (position in other file).
+ int64_t diff1Position = diff1DataStarts;
+ for(int64_t b = 0; b < diff1NumBlocks; ++b)
+ {
+ file_BlockIndexEntry e;
+ if(!rDiff1.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Where's the block?
+ int64_t blockEn = box_ntoh64(e.mEncodedSize);
+ if(blockEn <= 0)
+ {
+ // Just store the negated block number
+ diff1BlockStartPositions[b] = blockEn;
+ }
+ else
+ {
+ // Block is present in this file
+ diff1BlockStartPositions[b] = diff1Position;
+ diff1Position += blockEn;
+ }
+ }
+
+ // Finish off the list, so the last entry can have it's size calcuated.
+ diff1BlockStartPositions[diff1NumBlocks] = diff1Position;
+
+ // Now read the second diff's header, copying it to the out file
+ file_StreamFormat diff2Hdr;
+ if(!rDiff2.ReadFullBuffer(&diff2Hdr, sizeof(diff2Hdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(diff2Hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Copy
+ rOut.Write(&diff2Hdr, sizeof(diff2Hdr));
+ // Copy over filename and attributes
+ // BLOCK
+ {
+ BackupStoreFilename filename;
+ filename.ReadFromStream(rDiff2, IOStream::TimeOutInfinite);
+ filename.WriteToStream(rOut);
+ StreamableMemBlock attr;
+ attr.ReadFromStream(rDiff2, IOStream::TimeOutInfinite);
+ attr.WriteToStream(rOut);
+ }
+
+ // Get to the index of rDiff2b, and read the header
+ MoveStreamPositionToBlockIndex(rDiff2b);
+ file_BlockIndexHeader diff2IdxHdr;
+ if(!rDiff2b.ReadFullBuffer(&diff2IdxHdr, sizeof(diff2IdxHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(diff2IdxHdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ int64_t diff2NumBlocks = box_ntoh64(diff2IdxHdr.mNumBlocks);
+ int64_t diff2IndexEntriesStart = rDiff2b.GetPosition();
+
+ // Then read all the entries
+ int64_t diff2FilePosition = rDiff2.GetPosition();
+ for(int64_t b = 0; b < diff2NumBlocks; ++b)
+ {
+ file_BlockIndexEntry e;
+ if(!rDiff2b.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // What do to next about copying data
+ bool copyBlock = false;
+ int copySize = 0;
+ int64_t copyFrom = 0;
+ bool fromFileDiff1 = false;
+
+ // Where's the block?
+ int64_t blockEn = box_ntoh64(e.mEncodedSize);
+ if(blockEn > 0)
+ {
+ // Block is present in this file -- copy to out
+ copyBlock = true;
+ copyFrom = diff2FilePosition;
+ copySize = (int)blockEn;
+
+ // Move pointer onwards
+ diff2FilePosition += blockEn;
+ }
+ else
+ {
+ // Block isn't present here -- is it present in the old one?
+ int64_t blockIndex = 0 - blockEn;
+ if(blockIndex < 0 || blockIndex > diff1NumBlocks)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ if(diff1BlockStartPositions[blockIndex] > 0)
+ {
+ // Block is in the old diff file, copy it across
+ copyBlock = true;
+ copyFrom = diff1BlockStartPositions[blockIndex];
+ int nb = blockIndex + 1;
+ while(diff1BlockStartPositions[nb] <= 0)
+ {
+ // This is safe, because the last entry will terminate it properly!
+ ++nb;
+ ASSERT(nb <= diff1NumBlocks);
+ }
+ copySize = diff1BlockStartPositions[nb] - copyFrom;
+ fromFileDiff1 = true;
+ }
+ }
+ //TRACE4("%d %d %lld %d\n", copyBlock, copySize, copyFrom, fromFileDiff1);
+
+ // Copy data to the output file?
+ if(copyBlock)
+ {
+ // Allocate enough space
+ if(bufferSize < copySize || buffer == 0)
+ {
+ // Free old block
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ buffer = 0;
+ bufferSize = 0;
+ }
+ // Allocate new block
+ buffer = ::malloc(copySize);
+ if(buffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+ bufferSize = copySize;
+ }
+ ASSERT(bufferSize >= copySize);
+
+ // Load in the data
+ if(fromFileDiff1)
+ {
+ rDiff1.Seek(copyFrom, IOStream::SeekType_Absolute);
+ if(!rDiff1.ReadFullBuffer(buffer, copySize, 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ }
+ else
+ {
+ rDiff2.Seek(copyFrom, IOStream::SeekType_Absolute);
+ if(!rDiff2.ReadFullBuffer(buffer, copySize, 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ }
+ // Write out data
+ rOut.Write(buffer, copySize);
+ }
+ }
+
+ // Write the modified header
+ diff2IdxHdr.mOtherFileID = diff1IdxHdr.mOtherFileID;
+ rOut.Write(&diff2IdxHdr, sizeof(diff2IdxHdr));
+
+ // Then we'll write out the index, reading the data again
+ rDiff2b.Seek(diff2IndexEntriesStart, IOStream::SeekType_Absolute);
+ for(int64_t b = 0; b < diff2NumBlocks; ++b)
+ {
+ file_BlockIndexEntry e;
+ if(!rDiff2b.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Where's the block?
+ int64_t blockEn = box_ntoh64(e.mEncodedSize);
+
+ // If it's not in this file, it needs modification...
+ if(blockEn <= 0)
+ {
+ int64_t blockIndex = 0 - blockEn;
+ // In another file. Need to translate this against the other diff
+ if(diff1BlockStartPositions[blockIndex] > 0)
+ {
+ // Block is in the first diff file, stick in size
+ int nb = blockIndex + 1;
+ while(diff1BlockStartPositions[nb] <= 0)
+ {
+ // This is safe, because the last entry will terminate it properly!
+ ++nb;
+ ASSERT(nb <= diff1NumBlocks);
+ }
+ int64_t size = diff1BlockStartPositions[nb] - diff1BlockStartPositions[blockIndex];
+ e.mEncodedSize = box_hton64(size);
+ }
+ else
+ {
+ // Block in the original file, use translated value
+ e.mEncodedSize = box_hton64(diff1BlockStartPositions[blockIndex]);
+ }
+ }
+
+ // Write entry
+ rOut.Write(&e, sizeof(e));
+ }
+ }
+ catch(...)
+ {
+ // clean up
+ ::free(diff1BlockStartPositions);
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ }
+ throw;
+ }
+
+ // Clean up allocated memory
+ ::free(diff1BlockStartPositions);
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ }
+}
+
diff --git a/lib/backupstore/BackupStoreFileCmbIdx.cpp b/lib/backupstore/BackupStoreFileCmbIdx.cpp
new file mode 100644
index 00000000..c8bcc3b9
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileCmbIdx.cpp
@@ -0,0 +1,324 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileCmbIdx.cpp
+// Purpose: Combine indicies of a delta file and the file it's a diff from.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <new>
+#include <string.h>
+
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreFilename.h"
+
+#include "MemLeakFindOn.h"
+
+// Hide from outside world
+namespace
+{
+
+class BSFCombinedIndexStream : public IOStream
+{
+public:
+ BSFCombinedIndexStream(IOStream *pDiff);
+ ~BSFCombinedIndexStream();
+
+ virtual int Read(void *pBuffer, int NBytes, int Timeout = IOStream::TimeOutInfinite);
+ virtual void Write(const void *pBuffer, int NBytes);
+ virtual bool StreamDataLeft();
+ virtual bool StreamClosed();
+ virtual void Initialise(IOStream &rFrom);
+
+private:
+ IOStream *mpDiff;
+ bool mIsInitialised;
+ bool mHeaderWritten;
+ file_BlockIndexHeader mHeader;
+ int64_t mNumEntriesToGo;
+ int64_t mNumEntriesInFromFile;
+ int64_t *mFromBlockSizes; // NOTE: Entries in network byte order
+};
+
+};
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::CombineFileIndices(IOStream &, IOStream &, bool)
+// Purpose: Given a diff file and the file it's a diff from, return a stream from which
+// can be read the index of the combined file, without actually combining them.
+// The stream of the diff must have a lifetime greater than or equal to the
+// lifetime of the returned stream object. The full "from" file stream
+// only needs to exist during the actual function call.
+// If you pass in dodgy files which aren't related, then you will either
+// get an error or bad results. So don't do that.
+// If DiffIsIndexOnly is true, then rDiff is assumed to be a stream positioned
+// at the beginning of the block index. Similarly for FromIsIndexOnly.
+// WARNING: Reads of the returned streams with buffer sizes less than 64 bytes
+// will not return any data.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<IOStream> BackupStoreFile::CombineFileIndices(IOStream &rDiff, IOStream &rFrom, bool DiffIsIndexOnly, bool FromIsIndexOnly)
+{
+ // Reposition file pointers?
+ if(!DiffIsIndexOnly)
+ {
+ MoveStreamPositionToBlockIndex(rDiff);
+ }
+ if(!FromIsIndexOnly)
+ {
+ MoveStreamPositionToBlockIndex(rFrom);
+ }
+
+ // Create object
+ std::auto_ptr<IOStream> stream(new BSFCombinedIndexStream(&rDiff));
+
+ // Initialise it
+ ((BSFCombinedIndexStream *)stream.get())->Initialise(rFrom);
+
+ // And return the stream
+ return stream;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::BSFCombinedIndexStream()
+// Purpose: Private class. Constructor.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+BSFCombinedIndexStream::BSFCombinedIndexStream(IOStream *pDiff)
+ : mpDiff(pDiff),
+ mIsInitialised(false),
+ mHeaderWritten(false),
+ mNumEntriesToGo(0),
+ mNumEntriesInFromFile(0),
+ mFromBlockSizes(0)
+{
+ ASSERT(mpDiff != 0);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::~BSFCombinedIndexStream()
+// Purpose: Private class. Destructor.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+BSFCombinedIndexStream::~BSFCombinedIndexStream()
+{
+ if(mFromBlockSizes != 0)
+ {
+ ::free(mFromBlockSizes);
+ mFromBlockSizes = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::Initialise(IOStream &)
+// Purpose: Private class. Initalise from the streams (diff passed in constructor).
+// Both streams must have file pointer positioned at the block index.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+void BSFCombinedIndexStream::Initialise(IOStream &rFrom)
+{
+ // Paranoia is good.
+ if(mIsInitialised)
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ // Look at the diff file: Read in the header
+ if(!mpDiff->ReadFullBuffer(&mHeader, sizeof(mHeader), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(mHeader.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Read relevant data.
+ mNumEntriesToGo = box_ntoh64(mHeader.mNumBlocks);
+
+ // Adjust a bit to reflect the fact it's no longer a diff
+ mHeader.mOtherFileID = box_hton64(0);
+
+ // Now look at the from file: Read header
+ file_BlockIndexHeader fromHdr;
+ if(!rFrom.ReadFullBuffer(&fromHdr, sizeof(fromHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(fromHdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Then... allocate memory for the list of sizes
+ mNumEntriesInFromFile = box_ntoh64(fromHdr.mNumBlocks);
+ mFromBlockSizes = (int64_t*)::malloc(mNumEntriesInFromFile * sizeof(int64_t));
+ if(mFromBlockSizes == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ // And read them all in!
+ for(int64_t b = 0; b < mNumEntriesInFromFile; ++b)
+ {
+ file_BlockIndexEntry e;
+ if(!rFrom.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Check that the from file isn't a delta in itself
+ if(box_ntoh64(e.mEncodedSize) <= 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, OnCombineFromFileIsIncomplete)
+ }
+
+ // Store size (in network byte order)
+ mFromBlockSizes[b] = e.mEncodedSize;
+ }
+
+ // Flag as initialised
+ mIsInitialised = true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::Read(void *, int, int)
+// Purpose: Private class. As interface.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+int BSFCombinedIndexStream::Read(void *pBuffer, int NBytes, int Timeout)
+{
+ // Paranoia is good.
+ if(!mIsInitialised || mFromBlockSizes == 0 || mpDiff == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ int written = 0;
+
+ // Header output yet?
+ if(!mHeaderWritten)
+ {
+ // Enough space?
+ if(NBytes < (int)sizeof(mHeader)) return 0;
+
+ // Copy in
+ ::memcpy(pBuffer, &mHeader, sizeof(mHeader));
+ NBytes -= sizeof(mHeader);
+ written += sizeof(mHeader);
+
+ // Flag it's done
+ mHeaderWritten = true;
+ }
+
+ // How many entries can be written?
+ int entriesToWrite = NBytes / sizeof(file_BlockIndexEntry);
+ if(entriesToWrite > mNumEntriesToGo)
+ {
+ entriesToWrite = mNumEntriesToGo;
+ }
+
+ // Setup ready to go
+ file_BlockIndexEntry *poutput = (file_BlockIndexEntry*)(((uint8_t*)pBuffer) + written);
+
+ // Write entries
+ for(int b = 0; b < entriesToWrite; ++b)
+ {
+ if(!mpDiff->ReadFullBuffer(&(poutput[b]), sizeof(file_BlockIndexEntry), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Does this need adjusting?
+ int s = box_ntoh64(poutput[b].mEncodedSize);
+ if(s <= 0)
+ {
+ // A reference to a block in the from file
+ int block = 0 - s;
+ ASSERT(block >= 0);
+ if(block >= mNumEntriesInFromFile)
+ {
+ // That's not good, the block doesn't exist
+ THROW_EXCEPTION(BackupStoreException, OnCombineFromFileIsIncomplete)
+ }
+
+ // Adjust the entry in the buffer
+ poutput[b].mEncodedSize = mFromBlockSizes[block]; // stored in network byte order, no translation necessary
+ }
+ }
+
+ // Update written count
+ written += entriesToWrite * sizeof(file_BlockIndexEntry);
+ mNumEntriesToGo -= entriesToWrite;
+
+ return written;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::Write(const void *, int)
+// Purpose: Private class. As interface.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+void BSFCombinedIndexStream::Write(const void *pBuffer, int NBytes)
+{
+ THROW_EXCEPTION(BackupStoreException, StreamDoesntHaveRequiredFeatures)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::StreamDataLeft()
+// Purpose: Private class. As interface
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+bool BSFCombinedIndexStream::StreamDataLeft()
+{
+ return (!mHeaderWritten) || (mNumEntriesToGo > 0);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BSFCombinedIndexStream::StreamClosed()
+// Purpose: Private class. As interface.
+// Created: 8/7/04
+//
+// --------------------------------------------------------------------------
+bool BSFCombinedIndexStream::StreamClosed()
+{
+ return true; // doesn't do writing
+}
+
diff --git a/lib/backupstore/BackupStoreFileCombine.cpp b/lib/backupstore/BackupStoreFileCombine.cpp
new file mode 100644
index 00000000..baa331f0
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileCombine.cpp
@@ -0,0 +1,410 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileCombine.cpp
+// Purpose: File combining for BackupStoreFile
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <new>
+
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreFilename.h"
+#include "FileStream.h"
+
+#include "MemLeakFindOn.h"
+
+typedef struct
+{
+ int64_t mFilePosition;
+} FromIndexEntry;
+
+static void LoadFromIndex(IOStream &rFrom, FromIndexEntry *pIndex, int64_t NumEntries);
+static void CopyData(IOStream &rDiffData, IOStream &rDiffIndex, int64_t DiffNumBlocks, IOStream &rFrom, FromIndexEntry *pFromIndex, int64_t FromNumBlocks, IOStream &rOut);
+static void WriteNewIndex(IOStream &rDiff, int64_t DiffNumBlocks, FromIndexEntry *pFromIndex, int64_t FromNumBlocks, IOStream &rOut);
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::CombineFile(IOStream &, IOStream &, IOStream &)
+// Purpose: Where rDiff is a store file which is incomplete as a result of a
+// diffing operation, rFrom is the file it is diffed from, and
+// rOut is the stream in which to place the result, the old file
+// and new file are combined into a file containing all the data.
+// rDiff2 is the same file as rDiff, opened again to get two
+// independent streams to the same file.
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::CombineFile(IOStream &rDiff, IOStream &rDiff2, IOStream &rFrom, IOStream &rOut)
+{
+ // Read and copy the header.
+ file_StreamFormat hdr;
+ if(!rDiff.ReadFullBuffer(&hdr, sizeof(hdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Copy
+ rOut.Write(&hdr, sizeof(hdr));
+ // Copy over filename and attributes
+ // BLOCK
+ {
+ BackupStoreFilename filename;
+ filename.ReadFromStream(rDiff, IOStream::TimeOutInfinite);
+ filename.WriteToStream(rOut);
+ StreamableMemBlock attr;
+ attr.ReadFromStream(rDiff, IOStream::TimeOutInfinite);
+ attr.WriteToStream(rOut);
+ }
+
+ // Read the header for the From file
+ file_StreamFormat fromHdr;
+ if(!rFrom.ReadFullBuffer(&fromHdr, sizeof(fromHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(fromHdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Skip over the filename and attributes of the From file
+ // BLOCK
+ {
+ BackupStoreFilename filename2;
+ filename2.ReadFromStream(rFrom, IOStream::TimeOutInfinite);
+ int32_t size_s;
+ if(!rFrom.ReadFullBuffer(&size_s, sizeof(size_s), 0 /* not interested in bytes read if this fails */))
+ {
+ THROW_EXCEPTION(CommonException, StreamableMemBlockIncompleteRead)
+ }
+ int size = ntohl(size_s);
+ // Skip forward the size
+ rFrom.Seek(size, IOStream::SeekType_Relative);
+ }
+
+ // Allocate memory for the block index of the From file
+ int64_t fromNumBlocks = box_ntoh64(fromHdr.mNumBlocks);
+ // NOTE: An extra entry is required so that the length of the last block can be calculated
+ FromIndexEntry *pFromIndex = (FromIndexEntry*)::malloc((fromNumBlocks+1) * sizeof(FromIndexEntry));
+ if(pFromIndex == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ try
+ {
+ // Load the index from the From file, calculating the offsets in the
+ // file as we go along, and enforce that everything should be present.
+ LoadFromIndex(rFrom, pFromIndex, fromNumBlocks);
+
+ // Read in the block index of the Diff file in small chunks, and output data
+ // for each block, either from this file, or the other file.
+ int64_t diffNumBlocks = box_ntoh64(hdr.mNumBlocks);
+ CopyData(rDiff /* positioned at start of data */, rDiff2, diffNumBlocks, rFrom, pFromIndex, fromNumBlocks, rOut);
+
+ // Read in the block index again, and output the new block index, simply
+ // filling in the sizes of blocks from the old file.
+ WriteNewIndex(rDiff, diffNumBlocks, pFromIndex, fromNumBlocks, rOut);
+
+ // Free buffers
+ ::free(pFromIndex);
+ pFromIndex = 0;
+ }
+ catch(...)
+ {
+ // Clean up
+ if(pFromIndex != 0)
+ {
+ ::free(pFromIndex);
+ pFromIndex = 0;
+ }
+ throw;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static LoadFromIndex(IOStream &, FromIndexEntry *, int64_t)
+// Purpose: Static. Load the index from the From file
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+static void LoadFromIndex(IOStream &rFrom, FromIndexEntry *pIndex, int64_t NumEntries)
+{
+ ASSERT(pIndex != 0);
+ ASSERT(NumEntries >= 0);
+
+ // Get the starting point in the file
+ int64_t filePos = rFrom.GetPosition();
+
+ // Jump to the end of the file to read the index
+ rFrom.Seek(0 - ((NumEntries * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader)), IOStream::SeekType_End);
+
+ // Read block index header
+ file_BlockIndexHeader blkhdr;
+ if(!rFrom.ReadFullBuffer(&blkhdr, sizeof(blkhdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(blkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+ || (int64_t)box_ntoh64(blkhdr.mNumBlocks) != NumEntries)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // And then the block entries
+ for(int64_t b = 0; b < NumEntries; ++b)
+ {
+ // Read
+ file_BlockIndexEntry en;
+ if(!rFrom.ReadFullBuffer(&en, sizeof(en), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+
+ // Add to list
+ pIndex[b].mFilePosition = filePos;
+
+ // Encoded size?
+ int64_t encodedSize = box_ntoh64(en.mEncodedSize);
+ // Check that the block is actually there
+ if(encodedSize <= 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, OnCombineFromFileIsIncomplete)
+ }
+
+ // Move file pointer on
+ filePos += encodedSize;
+ }
+
+ // Store the position in the very last entry, so the size of the last entry can be calculated
+ pIndex[NumEntries].mFilePosition = filePos;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static CopyData(IOStream &, IOStream &, int64_t, IOStream &, FromIndexEntry *, int64_t, IOStream &)
+// Purpose: Static. Copy data from the Diff and From file to the out file.
+// rDiffData is at beginning of data.
+// rDiffIndex at any position.
+// rFrom is at any position.
+// rOut is after the header, ready for data
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+static void CopyData(IOStream &rDiffData, IOStream &rDiffIndex, int64_t DiffNumBlocks,
+ IOStream &rFrom, FromIndexEntry *pFromIndex, int64_t FromNumBlocks, IOStream &rOut)
+{
+ // Jump to the end of the diff file to read the index
+ rDiffIndex.Seek(0 - ((DiffNumBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader)), IOStream::SeekType_End);
+
+ // Read block index header
+ file_BlockIndexHeader diffBlkhdr;
+ if(!rDiffIndex.ReadFullBuffer(&diffBlkhdr, sizeof(diffBlkhdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(diffBlkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+ || (int64_t)box_ntoh64(diffBlkhdr.mNumBlocks) != DiffNumBlocks)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Record where the From file is
+ int64_t fromPos = rFrom.GetPosition();
+
+ // Buffer data
+ void *buffer = 0;
+ int bufferSize = 0;
+
+ try
+ {
+ // Read the blocks in!
+ for(int64_t b = 0; b < DiffNumBlocks; ++b)
+ {
+ // Read
+ file_BlockIndexEntry en;
+ if(!rDiffIndex.ReadFullBuffer(&en, sizeof(en), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+
+ // What's the size value stored in the entry
+ int64_t encodedSize = box_ntoh64(en.mEncodedSize);
+
+ // How much data will be read?
+ int32_t blockSize = 0;
+ if(encodedSize > 0)
+ {
+ // The block is actually in the diff file
+ blockSize = encodedSize;
+ }
+ else
+ {
+ // It's in the from file. First, check to see if it's valid
+ int64_t blockIdx = (0 - encodedSize);
+ if(blockIdx > FromNumBlocks)
+ {
+ // References a block which doesn't actually exist
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Calculate size. This operation is safe because of the extra entry at the end
+ blockSize = pFromIndex[blockIdx + 1].mFilePosition - pFromIndex[blockIdx].mFilePosition;
+ }
+ ASSERT(blockSize > 0);
+
+ // Make sure there's memory available to copy this
+ if(bufferSize < blockSize || buffer == 0)
+ {
+ // Free old block
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ buffer = 0;
+ bufferSize = 0;
+ }
+ // Allocate new block
+ buffer = ::malloc(blockSize);
+ if(buffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+ bufferSize = blockSize;
+ }
+ ASSERT(bufferSize >= blockSize);
+
+ // Load in data from one of the files
+ if(encodedSize > 0)
+ {
+ // Load from diff file
+ if(!rDiffData.ReadFullBuffer(buffer, blockSize, 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ }
+ else
+ {
+ // Locate and read the data from the from file
+ int64_t blockIdx = (0 - encodedSize);
+ // Seek if necessary
+ if(fromPos != pFromIndex[blockIdx].mFilePosition)
+ {
+ rFrom.Seek(pFromIndex[blockIdx].mFilePosition, IOStream::SeekType_Absolute);
+ fromPos = pFromIndex[blockIdx].mFilePosition;
+ }
+ // Read
+ if(!rFrom.ReadFullBuffer(buffer, blockSize, 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+
+ // Update fromPos to current position
+ fromPos += blockSize;
+ }
+
+ // Write data to out file
+ rOut.Write(buffer, blockSize);
+ }
+
+ // Free buffer, if allocated
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ buffer = 0;
+ }
+ }
+ catch(...)
+ {
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ buffer = 0;
+ }
+ throw;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static WriteNewIndex(IOStream &, int64_t, FromIndexEntry *, int64_t, IOStream &)
+// Purpose: Write the index to the out file, just copying from the diff file and
+// adjusting the entries.
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+static void WriteNewIndex(IOStream &rDiff, int64_t DiffNumBlocks, FromIndexEntry *pFromIndex, int64_t FromNumBlocks, IOStream &rOut)
+{
+ // Jump to the end of the diff file to read the index
+ rDiff.Seek(0 - ((DiffNumBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader)), IOStream::SeekType_End);
+
+ // Read block index header
+ file_BlockIndexHeader diffBlkhdr;
+ if(!rDiff.ReadFullBuffer(&diffBlkhdr, sizeof(diffBlkhdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(diffBlkhdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+ || (int64_t)box_ntoh64(diffBlkhdr.mNumBlocks) != DiffNumBlocks)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Write it out with a blanked out other file ID
+ diffBlkhdr.mOtherFileID = box_hton64(0);
+ rOut.Write(&diffBlkhdr, sizeof(diffBlkhdr));
+
+ // Rewrite the index
+ for(int64_t b = 0; b < DiffNumBlocks; ++b)
+ {
+ file_BlockIndexEntry en;
+ if(!rDiff.ReadFullBuffer(&en, sizeof(en), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+
+ // What's the size value stored in the entry
+ int64_t encodedSize = box_ntoh64(en.mEncodedSize);
+
+ // Need to adjust it?
+ if(encodedSize <= 0)
+ {
+ // This actually refers to a block in the from file. So rewrite this.
+ int64_t blockIdx = (0 - encodedSize);
+ if(blockIdx > FromNumBlocks)
+ {
+ // References a block which doesn't actually exist
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Calculate size. This operation is safe because of the extra entry at the end
+ int32_t blockSize = pFromIndex[blockIdx + 1].mFilePosition - pFromIndex[blockIdx].mFilePosition;
+ // Then replace entry
+ en.mEncodedSize = box_hton64(((uint64_t)blockSize));
+ }
+
+ // Write entry
+ rOut.Write(&en, sizeof(en));
+ }
+}
+
+
+
+
+
diff --git a/lib/backupstore/BackupStoreFileCryptVar.cpp b/lib/backupstore/BackupStoreFileCryptVar.cpp
new file mode 100644
index 00000000..e826de4e
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileCryptVar.cpp
@@ -0,0 +1,31 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileCryptVar.cpp
+// Purpose: Cryptographic keys for backup store files
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include "BackupStoreFileCryptVar.h"
+#include "BackupStoreFileWire.h"
+
+#include "MemLeakFindOn.h"
+
+CipherContext BackupStoreFileCryptVar::sBlowfishEncrypt;
+CipherContext BackupStoreFileCryptVar::sBlowfishDecrypt;
+
+#ifndef HAVE_OLD_SSL
+ CipherContext BackupStoreFileCryptVar::sAESEncrypt;
+ CipherContext BackupStoreFileCryptVar::sAESDecrypt;
+#endif
+
+// Default to blowfish
+CipherContext *BackupStoreFileCryptVar::spEncrypt = &BackupStoreFileCryptVar::sBlowfishEncrypt;
+uint8_t BackupStoreFileCryptVar::sEncryptCipherType = HEADER_BLOWFISH_ENCODING;
+
+CipherContext BackupStoreFileCryptVar::sBlowfishEncryptBlockEntry;
+CipherContext BackupStoreFileCryptVar::sBlowfishDecryptBlockEntry;
+
diff --git a/lib/backupstore/BackupStoreFileCryptVar.h b/lib/backupstore/BackupStoreFileCryptVar.h
new file mode 100644
index 00000000..566813c8
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileCryptVar.h
@@ -0,0 +1,39 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileCryptVar.h
+// Purpose: Cryptographic keys for backup store files
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILECRYPTVAR__H
+#define BACKUPSTOREFILECRYPTVAR__H
+
+#include "CipherContext.h"
+
+// Hide private static variables from the rest of the world by putting them
+// as static variables in a namespace.
+// -- don't put them as static class variables to avoid openssl/evp.h being
+// included all over the project.
+namespace BackupStoreFileCryptVar
+{
+ // Keys for the main file data
+ extern CipherContext sBlowfishEncrypt;
+ extern CipherContext sBlowfishDecrypt;
+ // Use AES when available
+#ifndef HAVE_OLD_SSL
+ extern CipherContext sAESEncrypt;
+ extern CipherContext sAESDecrypt;
+#endif
+ // How encoding will be done
+ extern CipherContext *spEncrypt;
+ extern uint8_t sEncryptCipherType;
+
+ // Keys for the block indicies
+ extern CipherContext sBlowfishEncryptBlockEntry;
+ extern CipherContext sBlowfishDecryptBlockEntry;
+}
+
+#endif // BACKUPSTOREFILECRYPTVAR__H
+
diff --git a/lib/backupstore/BackupStoreFileDiff.cpp b/lib/backupstore/BackupStoreFileDiff.cpp
new file mode 100644
index 00000000..fa8cb892
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileDiff.cpp
@@ -0,0 +1,1047 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileDiff.cpp
+// Purpose: Functions relating to diffing BackupStoreFiles
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <string.h>
+
+#include <new>
+#include <map>
+
+#ifdef HAVE_TIME_H
+ #include <time.h>
+#elif HAVE_SYS_TIME_H
+ #include <sys/time.h>
+#endif
+
+#include "BackupStoreConstants.h"
+#include "BackupStoreException.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreFileCryptVar.h"
+#include "BackupStoreFileEncodeStream.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "CommonException.h"
+#include "FileStream.h"
+#include "MD5Digest.h"
+#include "RollingChecksum.h"
+#include "Timer.h"
+
+#include "MemLeakFindOn.h"
+
+#include <cstring>
+
+using namespace BackupStoreFileCryptVar;
+using namespace BackupStoreFileCreation;
+
+// By default, don't trace out details of the diff as we go along -- would fill up logs significantly.
+// But it's useful for the test.
+#ifndef BOX_RELEASE_BUILD
+ bool BackupStoreFile::TraceDetailsOfDiffProcess = false;
+#endif
+
+static void LoadIndex(IOStream &rBlockIndex, int64_t ThisID, BlocksAvailableEntry **ppIndex, int64_t &rNumBlocksOut, int Timeout, bool &rCanDiffFromThis);
+static void FindMostUsedSizes(BlocksAvailableEntry *pIndex, int64_t NumBlocks, int32_t Sizes[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES]);
+static void SearchForMatchingBlocks(IOStream &rFile,
+ std::map<int64_t, int64_t> &rFoundBlocks, BlocksAvailableEntry *pIndex,
+ int64_t NumBlocks, int32_t Sizes[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES],
+ DiffTimer *pDiffTimer);
+static void SetupHashTable(BlocksAvailableEntry *pIndex, int64_t NumBlocks, int32_t BlockSize, BlocksAvailableEntry **pHashTable);
+static bool SecondStageMatch(BlocksAvailableEntry *pFirstInHashList, RollingChecksum &fastSum, uint8_t *pBeginnings, uint8_t *pEndings, int Offset, int32_t BlockSize, int64_t FileBlockNumber,
+BlocksAvailableEntry *pIndex, std::map<int64_t, int64_t> &rFoundBlocks);
+static void GenerateRecipe(BackupStoreFileEncodeStream::Recipe &rRecipe, BlocksAvailableEntry *pIndex, int64_t NumBlocks, std::map<int64_t, int64_t> &rFoundBlocks, int64_t SizeOfInputFile);
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::MoveStreamPositionToBlockIndex(IOStream &)
+// Purpose: Move the file pointer in this stream to just before the block index.
+// Assumes that the stream is at the beginning, seekable, and
+// reading from the stream is OK.
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::MoveStreamPositionToBlockIndex(IOStream &rStream)
+{
+ // Size of file
+ int64_t fileSize = rStream.BytesLeftToRead();
+
+ // Get header
+ file_StreamFormat hdr;
+
+ // Read the header
+ if(!rStream.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */, IOStream::TimeOutInfinite))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Check magic number
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ && ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
+#endif
+ )
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Work out where the index is
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+ int64_t blockHeaderPosFromEnd = ((numBlocks * sizeof(file_BlockIndexEntry)) + sizeof(file_BlockIndexHeader));
+
+ // Sanity check
+ if(blockHeaderPosFromEnd > static_cast<int64_t>(fileSize - sizeof(file_StreamFormat)))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Seek to that position
+ rStream.Seek(0 - blockHeaderPosFromEnd, IOStream::SeekType_End);
+
+ // Done. Stream now in right position (as long as the file is formatted correctly)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::EncodeFileDiff(const char *, int64_t, const BackupStoreFilename &, int64_t, IOStream &, int64_t *)
+// Purpose: Similar to EncodeFile, but takes the object ID of the file it's
+// diffing from, and the index of the blocks in a stream. It'll then
+// calculate which blocks can be reused from that old file.
+// The timeout is the timeout value for reading the diff block index.
+// If pIsCompletelyDifferent != 0, it will be set to true if the
+// the two files are completely different (do not share any block), false otherwise.
+//
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupStoreFileEncodeStream> BackupStoreFile::EncodeFileDiff
+(
+ const std::string& Filename, int64_t ContainerID,
+ const BackupStoreFilename &rStoreFilename, int64_t DiffFromObjectID,
+ IOStream &rDiffFromBlockIndex, int Timeout, DiffTimer *pDiffTimer,
+ int64_t *pModificationTime, bool *pIsCompletelyDifferent)
+{
+ // Is it a symlink?
+ {
+ EMU_STRUCT_STAT st;
+ if(EMU_LSTAT(Filename.c_str(), &st) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ if((st.st_mode & S_IFLNK) == S_IFLNK)
+ {
+ // Don't do diffs for symlinks
+ if(pIsCompletelyDifferent != 0)
+ {
+ *pIsCompletelyDifferent = true;
+ }
+ return EncodeFile(Filename, ContainerID, rStoreFilename, pModificationTime);
+ }
+ }
+
+ // Load in the blocks
+ BlocksAvailableEntry *pindex = 0;
+ int64_t blocksInIndex = 0;
+ bool canDiffFromThis = false;
+ LoadIndex(rDiffFromBlockIndex, DiffFromObjectID, &pindex, blocksInIndex, Timeout, canDiffFromThis);
+ // BOX_TRACE("Diff: Blocks in index: " << blocksInIndex);
+
+ if(!canDiffFromThis)
+ {
+ // Don't do diffing...
+ if(pIsCompletelyDifferent != 0)
+ {
+ *pIsCompletelyDifferent = true;
+ }
+ return EncodeFile(Filename, ContainerID, rStoreFilename, pModificationTime);
+ }
+
+ // Pointer to recipe we're going to create
+ BackupStoreFileEncodeStream::Recipe *precipe = 0;
+
+ try
+ {
+ // Find which sizes should be scanned
+ int32_t sizesToScan[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES];
+ FindMostUsedSizes(pindex, blocksInIndex, sizesToScan);
+
+ // Flag for reporting to the user
+ bool completelyDifferent;
+
+ // BLOCK
+ {
+ // Search the file to find matching blocks
+ std::map<int64_t, int64_t> foundBlocks; // map of offset in file to index in block index
+ int64_t sizeOfInputFile = 0;
+ // BLOCK
+ {
+ FileStream file(Filename);
+ // Get size of file
+ sizeOfInputFile = file.BytesLeftToRead();
+ // Find all those lovely matching blocks
+ SearchForMatchingBlocks(file, foundBlocks, pindex,
+ blocksInIndex, sizesToScan, pDiffTimer);
+
+ // Is it completely different?
+ completelyDifferent = (foundBlocks.size() == 0);
+ }
+
+ // Create a recipe -- if the two files are completely different, don't put the from file ID in the recipe.
+ precipe = new BackupStoreFileEncodeStream::Recipe(pindex, blocksInIndex, completelyDifferent?(0):(DiffFromObjectID));
+ BlocksAvailableEntry *pindexKeptRef = pindex; // we need this later, but must set pindex == 0 now, because of exceptions
+ pindex = 0; // Recipe now has ownership
+
+ // Fill it in
+ GenerateRecipe(*precipe, pindexKeptRef, blocksInIndex, foundBlocks, sizeOfInputFile);
+ }
+ // foundBlocks no longer required
+
+ // Create the stream
+ std::auto_ptr<BackupStoreFileEncodeStream> stream(
+ new BackupStoreFileEncodeStream);
+
+ // Do the initial setup
+ stream->Setup(Filename, precipe, ContainerID, rStoreFilename, pModificationTime);
+ precipe = 0; // Stream has taken ownership of this
+
+ // Tell user about completely different status?
+ if(pIsCompletelyDifferent != 0)
+ {
+ *pIsCompletelyDifferent = completelyDifferent;
+ }
+
+ // Return the stream for the caller
+ return stream;
+ }
+ catch(...)
+ {
+ // cleanup
+ if(pindex != 0)
+ {
+ ::free(pindex);
+ pindex = 0;
+ }
+ if(precipe != 0)
+ {
+ delete precipe;
+ precipe = 0;
+ }
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static LoadIndex(IOStream &, int64_t, BlocksAvailableEntry **, int64_t, bool &)
+// Purpose: Read in an index, and decrypt, and store in the in memory block format.
+// rCanDiffFromThis is set to false if the version of the from file is too old.
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+static void LoadIndex(IOStream &rBlockIndex, int64_t ThisID, BlocksAvailableEntry **ppIndex, int64_t &rNumBlocksOut, int Timeout, bool &rCanDiffFromThis)
+{
+ // Reset
+ rNumBlocksOut = 0;
+ rCanDiffFromThis = false;
+
+ // Read header
+ file_BlockIndexHeader hdr;
+ if(!rBlockIndex.ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ // Couldn't read header
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ // Check against backwards comptaibility stuff
+ if(hdr.mMagicValue == (int32_t)htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0))
+ {
+ // Won't diff against old version
+
+ // Absorb rest of stream
+ char buffer[2048];
+ while(rBlockIndex.StreamDataLeft())
+ {
+ rBlockIndex.Read(buffer, sizeof(buffer), 1000 /* 1 sec timeout */);
+ }
+
+ // Tell caller
+ rCanDiffFromThis = false;
+ return;
+ }
+#endif
+
+ // Check magic
+ if(hdr.mMagicValue != (int32_t)htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Check that we're not trying to diff against a file which references blocks from another file
+ if(((int64_t)box_ntoh64(hdr.mOtherFileID)) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CannotDiffAnIncompleteStoreFile)
+ }
+
+ // Mark as an acceptable diff.
+ rCanDiffFromThis = true;
+
+ // Get basic information
+ int64_t numBlocks = box_ntoh64(hdr.mNumBlocks);
+ uint64_t entryIVBase = box_ntoh64(hdr.mEntryIVBase);
+
+ //TODO: Verify that these sizes look reasonable
+
+ // Allocate space for the index
+ BlocksAvailableEntry *pindex = (BlocksAvailableEntry*)::malloc(sizeof(BlocksAvailableEntry) * numBlocks);
+ if(pindex == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ try
+ {
+ for(int64_t b = 0; b < numBlocks; ++b)
+ {
+ // Read an entry from the stream
+ file_BlockIndexEntry entry;
+ if(!rBlockIndex.ReadFullBuffer(&entry, sizeof(entry), 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ // Couldn't read entry
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Calculate IV for this entry
+ uint64_t iv = entryIVBase;
+ iv += b;
+ // Network byte order
+ iv = box_hton64(iv);
+ sBlowfishDecryptBlockEntry.SetIV(&iv);
+
+ // Decrypt the encrypted section
+ file_BlockIndexEntryEnc entryEnc;
+ int sectionSize = sBlowfishDecryptBlockEntry.TransformBlock(&entryEnc, sizeof(entryEnc),
+ entry.mEnEnc, sizeof(entry.mEnEnc));
+ if(sectionSize != sizeof(entryEnc))
+ {
+ THROW_EXCEPTION(BackupStoreException, BlockEntryEncodingDidntGiveExpectedLength)
+ }
+
+ // Check that we're not trying to diff against a file which references blocks from another file
+ if(((int64_t)box_ntoh64(entry.mEncodedSize)) <= 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CannotDiffAnIncompleteStoreFile)
+ }
+
+ // Store all the required information
+ pindex[b].mpNextInHashList = 0; // hash list not set up yet
+ pindex[b].mSize = ntohl(entryEnc.mSize);
+ pindex[b].mWeakChecksum = ntohl(entryEnc.mWeakChecksum);
+ ::memcpy(pindex[b].mStrongChecksum, entryEnc.mStrongChecksum, sizeof(pindex[b].mStrongChecksum));
+ }
+
+ // Store index pointer for called
+ ASSERT(ppIndex != 0);
+ *ppIndex = pindex;
+
+ // Store number of blocks for caller
+ rNumBlocksOut = numBlocks;
+
+ }
+ catch(...)
+ {
+ // clean up and send the exception along its way
+ ::free(pindex);
+ throw;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static FindMostUsedSizes(BlocksAvailableEntry *, int64_t, int32_t[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES])
+// Purpose: Finds the most commonly used block sizes in the index
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+static void FindMostUsedSizes(BlocksAvailableEntry *pIndex, int64_t NumBlocks, int32_t Sizes[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES])
+{
+ // Array for lengths
+ int64_t sizeCounts[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES];
+
+ // Set arrays to lots of zeros (= unused entries)
+ for(int l = 0; l < BACKUP_FILE_DIFF_MAX_BLOCK_SIZES; ++l)
+ {
+ Sizes[l] = 0;
+ sizeCounts[l] = 0;
+ }
+
+ // Array for collecting sizes
+ std::map<int32_t, int64_t> foundSizes;
+
+ // Run through blocks and make a count of the entries
+ for(int64_t b = 0; b < NumBlocks; ++b)
+ {
+ // Only if the block size is bigger than the minimum size we'll scan for
+ if(pIndex[b].mSize > BACKUP_FILE_DIFF_MIN_BLOCK_SIZE)
+ {
+ // Find entry?
+ std::map<int32_t, int64_t>::const_iterator f(foundSizes.find(pIndex[b].mSize));
+ if(f != foundSizes.end())
+ {
+ // Increment existing entry
+ foundSizes[pIndex[b].mSize] = foundSizes[pIndex[b].mSize] + 1;
+ }
+ else
+ {
+ // New entry
+ foundSizes[pIndex[b].mSize] = 1;
+ }
+ }
+ }
+
+ // Make the block sizes
+ for(std::map<int32_t, int64_t>::const_iterator i(foundSizes.begin()); i != foundSizes.end(); ++i)
+ {
+ // Find the position of the size in the array
+ for(int t = 0; t < BACKUP_FILE_DIFF_MAX_BLOCK_SIZES; ++t)
+ {
+ // Instead of sorting on the raw count of blocks,
+ // take the file area covered by this block size.
+ if(i->second * i->first > sizeCounts[t] * Sizes[t])
+ {
+ // Then this size belong before this entry -- shuffle them up
+ for(int s = (BACKUP_FILE_DIFF_MAX_BLOCK_SIZES - 1); s >= t; --s)
+ {
+ Sizes[s] = Sizes[s-1];
+ sizeCounts[s] = sizeCounts[s-1];
+ }
+
+ // Insert this size
+ Sizes[t] = i->first;
+ sizeCounts[t] = i->second;
+
+ // Shouldn't do any more searching
+ break;
+ }
+ }
+ }
+
+ // trace the size table in debug builds
+#ifndef BOX_RELEASE_BUILD
+ if(BackupStoreFile::TraceDetailsOfDiffProcess)
+ {
+ for(int t = 0; t < BACKUP_FILE_DIFF_MAX_BLOCK_SIZES; ++t)
+ {
+ BOX_TRACE("Diff block size " << t << ": " <<
+ Sizes[t] << " (count = " <<
+ sizeCounts[t] << ")");
+ }
+ }
+#endif
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static SearchForMatchingBlocks(IOStream &, std::map<int64_t, int64_t> &, BlocksAvailableEntry *, int64_t, int32_t[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES])
+// Purpose: Find the matching blocks within the file.
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+static void SearchForMatchingBlocks(IOStream &rFile, std::map<int64_t, int64_t> &rFoundBlocks,
+ BlocksAvailableEntry *pIndex, int64_t NumBlocks,
+ int32_t Sizes[BACKUP_FILE_DIFF_MAX_BLOCK_SIZES], DiffTimer *pDiffTimer)
+{
+ Timer maximumDiffingTime(0, "MaximumDiffingTime");
+
+ if(pDiffTimer && pDiffTimer->IsManaged())
+ {
+ maximumDiffingTime = Timer(pDiffTimer->GetMaximumDiffingTime() *
+ MILLI_SEC_IN_SEC, "MaximumDiffingTime");
+ }
+
+ std::map<int64_t, int32_t> goodnessOfFit;
+
+ // Allocate the hash lookup table
+ BlocksAvailableEntry **phashTable = (BlocksAvailableEntry **)::malloc(sizeof(BlocksAvailableEntry *) * (64*1024));
+
+ // Choose a size for the buffer, just a little bit more than the maximum block size
+ int32_t bufSize = Sizes[0];
+ for(int z = 1; z < BACKUP_FILE_DIFF_MAX_BLOCK_SIZES; ++z)
+ {
+ if(Sizes[z] > bufSize) bufSize = Sizes[z];
+ }
+ bufSize += 4;
+ ASSERT(bufSize > Sizes[0]);
+ ASSERT(bufSize > 0);
+ if(bufSize > (BACKUP_FILE_MAX_BLOCK_SIZE + 1024))
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // TODO: Because we read in the file a scanned block size at a time,
+ // it is likely to be inefficient. Probably will be much better to
+ // calculate checksums for all block sizes in a single pass.
+
+ // Allocate the buffers.
+ uint8_t *pbuffer0 = (uint8_t *)::malloc(bufSize);
+ uint8_t *pbuffer1 = (uint8_t *)::malloc(bufSize);
+ try
+ {
+ // Check buffer allocation
+ if(pbuffer0 == 0 || pbuffer1 == 0 || phashTable == 0)
+ {
+ // If a buffer got allocated, it will be cleaned up in the catch block
+ throw std::bad_alloc();
+ }
+
+ // Flag to abort the run, if too many blocks are found -- avoid using
+ // huge amounts of processor time when files contain many similar blocks.
+ bool abortSearch = false;
+
+ // Search for each block size in turn
+ // NOTE: Do the smallest size first, so that the scheme for adding
+ // entries in the found list works as expected and replaces smallers block
+ // with larger blocks when it finds matches at the same offset in the file.
+ for(int s = BACKUP_FILE_DIFF_MAX_BLOCK_SIZES - 1; s >= 0; --s)
+ {
+ ASSERT(Sizes[s] <= bufSize);
+ BOX_TRACE("Diff pass " << s << ", for block size " <<
+ Sizes[s]);
+
+ // Check we haven't finished
+ if(Sizes[s] == 0)
+ {
+ // empty entry, try next size
+ continue;
+ }
+
+ // Set up the hash table entries
+ SetupHashTable(pIndex, NumBlocks, Sizes[s], phashTable);
+
+ // Shift file position to beginning
+ rFile.Seek(0, IOStream::SeekType_Absolute);
+
+ // Read first block
+ if(rFile.Read(pbuffer0, Sizes[s]) != Sizes[s])
+ {
+ // Size of file too short to match -- do next size
+ continue;
+ }
+
+ // Setup block pointers
+ uint8_t *beginnings = pbuffer0;
+ uint8_t *endings = pbuffer1;
+ int offset = 0;
+
+ // Calculate the first checksum, ready for rolling
+ RollingChecksum rolling(beginnings, Sizes[s]);
+
+ // Then roll, until the file is exhausted
+ int64_t fileBlockNumber = 0;
+ int64_t fileOffset = 0;
+ int rollOverInitialBytes = 0;
+ while(true)
+ {
+ if(maximumDiffingTime.HasExpired())
+ {
+ ASSERT(pDiffTimer != NULL);
+ BOX_INFO("MaximumDiffingTime reached - "
+ "suspending file diff");
+ abortSearch = true;
+ break;
+ }
+
+ if(pDiffTimer)
+ {
+ pDiffTimer->DoKeepAlive();
+ }
+
+ // Load in another block of data, and record how big it is
+ int bytesInEndings = rFile.Read(endings, Sizes[s]);
+ int tmp;
+
+ // Skip any bytes from a previous matched block
+ if(rollOverInitialBytes > 0 && offset < bytesInEndings)
+ {
+ int spaceLeft = bytesInEndings - offset;
+ int thisRoll = (rollOverInitialBytes > spaceLeft) ? spaceLeft : rollOverInitialBytes;
+
+ rolling.RollForwardSeveral(beginnings+offset, endings+offset, Sizes[s], thisRoll);
+
+ offset += thisRoll;
+ fileOffset += thisRoll;
+ rollOverInitialBytes -= thisRoll;
+
+ if(rollOverInitialBytes)
+ {
+ goto refresh;
+ }
+ }
+
+ if(goodnessOfFit.count(fileOffset))
+ {
+ tmp = goodnessOfFit[fileOffset];
+ }
+ else
+ {
+ tmp = 0;
+ }
+
+ if(tmp >= Sizes[s])
+ {
+ // Skip over bigger ready-matched blocks completely
+ rollOverInitialBytes = tmp;
+ int spaceLeft = bytesInEndings - offset;
+ int thisRoll = (rollOverInitialBytes > spaceLeft) ? spaceLeft : rollOverInitialBytes;
+
+ rolling.RollForwardSeveral(beginnings+offset, endings+offset, Sizes[s], thisRoll);
+
+ offset += thisRoll;
+ fileOffset += thisRoll;
+ rollOverInitialBytes -= thisRoll;
+
+ if(rollOverInitialBytes)
+ {
+ goto refresh;
+ }
+ }
+
+ while(offset < bytesInEndings)
+ {
+ // Is current checksum in hash list?
+ uint16_t hash = rolling.GetComponentForHashing();
+ if(phashTable[hash] != 0 && (goodnessOfFit.count(fileOffset) == 0 || goodnessOfFit[fileOffset] < Sizes[s]))
+ {
+ if(SecondStageMatch(phashTable[hash], rolling, beginnings, endings, offset, Sizes[s], fileBlockNumber, pIndex, rFoundBlocks))
+ {
+ BOX_TRACE("Found block match for " << hash << " of " << Sizes[s] << " bytes at offset " << fileOffset);
+ goodnessOfFit[fileOffset] = Sizes[s];
+
+ // Block matched, roll the checksum forward to the next block without doing
+ // any more comparisons, because these are pointless (as any more matches will be ignored when
+ // the recipe is generated) and just take up valuable processor time. Edge cases are
+ // especially nasty, using huge amounts of time and memory.
+ int skip = Sizes[s];
+ if(offset < bytesInEndings && skip > 0)
+ {
+ int spaceLeft = bytesInEndings - offset;
+ int thisRoll = (skip > spaceLeft) ? spaceLeft : skip;
+
+ rolling.RollForwardSeveral(beginnings+offset, endings+offset, Sizes[s], thisRoll);
+
+ offset += thisRoll;
+ fileOffset += thisRoll;
+ skip -= thisRoll;
+ }
+ // Not all the bytes necessary will have been skipped, so get them
+ // skipped after the next block is loaded.
+ rollOverInitialBytes = skip;
+
+ // End this loop, so the final byte isn't used again
+ break;
+ }
+ else
+ {
+ BOX_TRACE("False alarm match for " << hash << " of " << Sizes[s] << " bytes at offset " << fileOffset);
+ }
+
+ int64_t NumBlocksFound = static_cast<int64_t>(
+ rFoundBlocks.size());
+ int64_t MaxBlocksFound = NumBlocks *
+ BACKUP_FILE_DIFF_MAX_BLOCK_FIND_MULTIPLE;
+
+ if(NumBlocksFound > MaxBlocksFound)
+ {
+ abortSearch = true;
+ break;
+ }
+ }
+
+ // Roll checksum forward
+ rolling.RollForward(beginnings[offset], endings[offset], Sizes[s]);
+
+ // Increment offsets
+ ++offset;
+ ++fileOffset;
+ }
+
+ if(abortSearch) break;
+
+ refresh:
+ // Finished?
+ if(bytesInEndings != Sizes[s])
+ {
+ // No more data in file -- check the final block
+ // (Do a copy and paste of 5 lines of code instead of introducing a comparison for
+ // each byte of the file)
+ uint16_t hash = rolling.GetComponentForHashing();
+ if(phashTable[hash] != 0 && (goodnessOfFit.count(fileOffset) == 0 || goodnessOfFit[fileOffset] < Sizes[s]))
+ {
+ if(SecondStageMatch(phashTable[hash], rolling, beginnings, endings, offset, Sizes[s], fileBlockNumber, pIndex, rFoundBlocks))
+ {
+ goodnessOfFit[fileOffset] = Sizes[s];
+ }
+ }
+
+ // finish
+ break;
+ }
+
+ // Switch buffers, reset offset
+ beginnings = endings;
+ endings = (beginnings == pbuffer0)?(pbuffer1):(pbuffer0); // ie the other buffer
+ offset = 0;
+
+ // And count the blocks which have been done
+ ++fileBlockNumber;
+ }
+
+ if(abortSearch) break;
+ }
+
+ // Free buffers and hash table
+ ::free(pbuffer1);
+ pbuffer1 = 0;
+ ::free(pbuffer0);
+ pbuffer0 = 0;
+ ::free(phashTable);
+ phashTable = 0;
+ }
+ catch(...)
+ {
+ // Cleanup and throw
+ if(pbuffer1 != 0) ::free(pbuffer1);
+ if(pbuffer0 != 0) ::free(pbuffer0);
+ if(phashTable != 0) ::free(phashTable);
+ throw;
+ }
+
+#ifndef BOX_RELEASE_BUILD
+ if(BackupStoreFile::TraceDetailsOfDiffProcess)
+ {
+ // Trace out the found blocks in debug mode
+ BOX_TRACE("Diff: list of found blocks");
+ BOX_TRACE("======== ======== ======== ========");
+ BOX_TRACE(" Offset BlkIdx Size Movement");
+ for(std::map<int64_t, int64_t>::const_iterator i(rFoundBlocks.begin()); i != rFoundBlocks.end(); ++i)
+ {
+ int64_t orgLoc = 0;
+ for(int64_t b = 0; b < i->second; ++b)
+ {
+ orgLoc += pIndex[b].mSize;
+ }
+ BOX_TRACE(std::setw(8) << i->first << " " <<
+ std::setw(8) << i->second << " " <<
+ std::setw(8) << pIndex[i->second].mSize <<
+ " " <<
+ std::setw(8) << (i->first - orgLoc));
+ }
+ BOX_TRACE("======== ======== ======== ========");
+ }
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static SetupHashTable(BlocksAvailableEntry *, int64_t, in32_t, BlocksAvailableEntry **)
+// Purpose: Set up the hash table ready for a scan
+// Created: 14/1/04
+//
+// --------------------------------------------------------------------------
+static void SetupHashTable(BlocksAvailableEntry *pIndex, int64_t NumBlocks, int32_t BlockSize, BlocksAvailableEntry **pHashTable)
+{
+ // Set all entries in the hash table to zero
+ ::memset(pHashTable, 0, (sizeof(BlocksAvailableEntry *) * (64*1024)));
+
+ // Scan through the blocks, building the hash table
+ for(int64_t b = 0; b < NumBlocks; ++b)
+ {
+ // Only look at the required block size
+ if(pIndex[b].mSize == BlockSize)
+ {
+ // Get the value under which to hash this entry
+ uint16_t hash = RollingChecksum::ExtractHashingComponent(pIndex[b].mWeakChecksum);
+
+ // Already present in table?
+ if(pHashTable[hash] != 0)
+ {
+ //BOX_TRACE("Another hash entry for " << hash << " found");
+ // Yes -- need to set the pointer in this entry to the current entry to build the linked list
+ pIndex[b].mpNextInHashList = pHashTable[hash];
+ }
+
+ // Put a pointer to this entry in the hash table
+ pHashTable[hash] = pIndex + b;
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static bool SecondStageMatch(xxx)
+// Purpose: When a match in the hash table is found, scan for second stage match using strong checksum.
+// Created: 14/1/04
+//
+// --------------------------------------------------------------------------
+static bool SecondStageMatch(BlocksAvailableEntry *pFirstInHashList, RollingChecksum &fastSum, uint8_t *pBeginnings, uint8_t *pEndings,
+ int Offset, int32_t BlockSize, int64_t FileBlockNumber, BlocksAvailableEntry *pIndex, std::map<int64_t, int64_t> &rFoundBlocks)
+{
+ // Check parameters
+ ASSERT(pBeginnings != 0);
+ ASSERT(pEndings != 0);
+ ASSERT(Offset >= 0);
+ ASSERT(BlockSize > 0);
+ ASSERT(pFirstInHashList != 0);
+ ASSERT(pIndex != 0);
+
+#ifndef BOX_RELEASE_BUILD
+ uint16_t DEBUG_Hash = fastSum.GetComponentForHashing();
+#endif
+ uint32_t Checksum = fastSum.GetChecksum();
+
+ // Before we go to the expense of the MD5, make sure it's a darn good match on the checksum we already know.
+ BlocksAvailableEntry *scan = pFirstInHashList;
+ bool found=false;
+ while(scan != 0)
+ {
+ if(scan->mWeakChecksum == Checksum)
+ {
+ found = true;
+ break;
+ }
+ scan = scan->mpNextInHashList;
+ }
+ if(!found)
+ {
+ return false;
+ }
+
+ // Calculate the strong MD5 digest for this block
+ MD5Digest strong;
+ // Add the data from the beginnings
+ strong.Add(pBeginnings + Offset, BlockSize - Offset);
+ // Add any data from the endings
+ if(Offset > 0)
+ {
+ strong.Add(pEndings, Offset);
+ }
+ strong.Finish();
+
+ // Then go through the entries in the hash list, comparing with the strong digest calculated
+ scan = pFirstInHashList;
+ //BOX_TRACE("second stage match");
+ while(scan != 0)
+ {
+ //BOX_TRACE("scan size " << scan->mSize <<
+ // ", block size " << BlockSize <<
+ // ", hash " << Hash);
+ ASSERT(scan->mSize == BlockSize);
+ ASSERT(RollingChecksum::ExtractHashingComponent(scan->mWeakChecksum) == DEBUG_Hash);
+
+ // Compare?
+ if(strong.DigestMatches(scan->mStrongChecksum))
+ {
+ //BOX_TRACE("Match!\n");
+ // Found! Add to list of found blocks...
+ int64_t fileOffset = (FileBlockNumber * BlockSize) + Offset;
+ int64_t blockIndex = (scan - pIndex); // pointer arthmitic is frowned upon. But most efficient way of doing it here -- alternative is to use more memory
+
+ // We do NOT search for smallest blocks first, as this code originally assumed.
+ // To prevent this from potentially overwriting a better match, the caller must determine
+ // the relative "goodness" of any existing match and this one, and avoid the call if it
+ // could be detrimental.
+ rFoundBlocks[fileOffset] = blockIndex;
+
+ // No point in searching further, report success
+ return true;
+ }
+
+ // Next
+ scan = scan->mpNextInHashList;
+ }
+
+ // Not matched
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: static GenerateRecipe(BackupStoreFileEncodeStream::Recipe &, BlocksAvailableEntry *, int64_t, std::map<int64_t, int64_t> &)
+// Purpose: Fills in the recipe from the found block list
+// Created: 15/1/04
+//
+// --------------------------------------------------------------------------
+static void GenerateRecipe(BackupStoreFileEncodeStream::Recipe &rRecipe, BlocksAvailableEntry *pIndex,
+ int64_t NumBlocks, std::map<int64_t, int64_t> &rFoundBlocks, int64_t SizeOfInputFile)
+{
+ // NOTE: This function could be a lot more sophisiticated. For example, if
+ // a small block overlaps a big block like this
+ // ****
+ // *******************************
+ // then the small block will be used, not the big one. But it'd be better to
+ // just ignore the small block and keep the big one. However, some stats should
+ // be gathered about real world files before writing complex code which might
+ // go wrong.
+
+ // Initialise a blank instruction
+ BackupStoreFileEncodeStream::RecipeInstruction instruction;
+ #define RESET_INSTRUCTION \
+ instruction.mSpaceBefore = 0; \
+ instruction.mBlocks = 0; \
+ instruction.mpStartBlock = 0;
+ RESET_INSTRUCTION
+
+ // First, a special case for when there are no found blocks
+ if(rFoundBlocks.size() == 0)
+ {
+ // No blocks, just a load of space
+ instruction.mSpaceBefore = SizeOfInputFile;
+ rRecipe.push_back(instruction);
+
+ #ifndef BOX_RELEASE_BUILD
+ if(BackupStoreFile::TraceDetailsOfDiffProcess)
+ {
+ BOX_TRACE("Diff: Default recipe generated, " <<
+ SizeOfInputFile << " bytes of file");
+ }
+ #endif
+
+ // Don't do anything
+ return;
+ }
+
+ // Current location
+ int64_t loc = 0;
+
+ // Then iterate through the list, generating the recipe
+ std::map<int64_t, int64_t>::const_iterator i(rFoundBlocks.begin());
+ ASSERT(i != rFoundBlocks.end()); // check logic
+
+ // Counting for debug tracing
+#ifndef BOX_RELEASE_BUILD
+ int64_t debug_NewBytesFound = 0;
+ int64_t debug_OldBlocksUsed = 0;
+#endif
+
+ for(; i != rFoundBlocks.end(); ++i)
+ {
+ // Remember... map is (position in file) -> (index of block in pIndex)
+
+ if(i->first < loc)
+ {
+ // This block overlaps the last one
+ continue;
+ }
+ else if(i->first > loc)
+ {
+ // There's a gap between the end of the last thing and this block.
+ // If there's an instruction waiting, push it onto the list
+ if(instruction.mSpaceBefore != 0 || instruction.mpStartBlock != 0)
+ {
+ rRecipe.push_back(instruction);
+ }
+ // Start a new instruction, with the gap ready
+ RESET_INSTRUCTION
+ instruction.mSpaceBefore = i->first - loc;
+ // Move location forward to match
+ loc += instruction.mSpaceBefore;
+#ifndef BOX_RELEASE_BUILD
+ debug_NewBytesFound += instruction.mSpaceBefore;
+#endif
+ }
+
+ // First, does the current instruction need pushing back, because this block is not
+ // sequential to the last one?
+ if(instruction.mpStartBlock != 0 && (pIndex + i->second) != (instruction.mpStartBlock + instruction.mBlocks))
+ {
+ rRecipe.push_back(instruction);
+ RESET_INSTRUCTION
+ }
+
+ // Add in this block
+ if(instruction.mpStartBlock == 0)
+ {
+ // This block starts a new instruction
+ instruction.mpStartBlock = pIndex + i->second;
+ instruction.mBlocks = 1;
+ }
+ else
+ {
+ // It continues the previous section of blocks
+ instruction.mBlocks += 1;
+ }
+
+#ifndef BOX_RELEASE_BUILD
+ debug_OldBlocksUsed++;
+#endif
+
+ // Move location forward
+ loc += pIndex[i->second].mSize;
+ }
+
+ // Push the last instruction generated
+ rRecipe.push_back(instruction);
+
+ // Is there any space left at the end which needs sending?
+ if(loc != SizeOfInputFile)
+ {
+ RESET_INSTRUCTION
+ instruction.mSpaceBefore = SizeOfInputFile - loc;
+#ifndef BOX_RELEASE_BUILD
+ debug_NewBytesFound += instruction.mSpaceBefore;
+#endif
+ rRecipe.push_back(instruction);
+ }
+
+ // dump out the recipe
+#ifndef BOX_RELEASE_BUILD
+ BOX_TRACE("Diff: " <<
+ debug_NewBytesFound << " new bytes found, " <<
+ debug_OldBlocksUsed << " old blocks used");
+ if(BackupStoreFile::TraceDetailsOfDiffProcess)
+ {
+ BOX_TRACE("Diff: Recipe generated (size " << rRecipe.size());
+ BOX_TRACE("======== ========= ========");
+ BOX_TRACE("Space b4 FirstBlk NumBlks");
+ {
+ for(unsigned int e = 0; e < rRecipe.size(); ++e)
+ {
+ char b[64];
+#ifdef WIN32
+ sprintf(b, "%8I64d", (int64_t)(rRecipe[e].mpStartBlock - pIndex));
+#else
+ sprintf(b, "%8lld", (int64_t)(rRecipe[e].mpStartBlock - pIndex));
+#endif
+ BOX_TRACE(std::setw(8) <<
+ rRecipe[e].mSpaceBefore <<
+ " " <<
+ ((rRecipe[e].mpStartBlock == 0)?" -":b) <<
+ " " << std::setw(8) <<
+ rRecipe[e].mBlocks);
+ }
+ }
+ BOX_TRACE("======== ========= ========");
+ }
+#endif
+}
diff --git a/lib/backupstore/BackupStoreFileEncodeStream.cpp b/lib/backupstore/BackupStoreFileEncodeStream.cpp
new file mode 100644
index 00000000..b53c4c26
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileEncodeStream.cpp
@@ -0,0 +1,717 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileEncodeStream.cpp
+// Purpose: Implement stream-based file encoding for the backup store
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <string.h>
+
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreException.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreFileCryptVar.h"
+#include "BackupStoreFileEncodeStream.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "BoxTime.h"
+#include "FileStream.h"
+#include "Random.h"
+#include "RollingChecksum.h"
+
+#include "MemLeakFindOn.h"
+
+#include <cstring>
+
+using namespace BackupStoreFileCryptVar;
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::BackupStoreFileEncodeStream
+// Purpose: Constructor (opens file)
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+BackupStoreFileEncodeStream::BackupStoreFileEncodeStream()
+ : mpRecipe(0),
+ mpFile(0),
+ mpLogging(0),
+ mpRunStatusProvider(NULL),
+ mStatus(Status_Header),
+ mSendData(true),
+ mTotalBlocks(0),
+ mAbsoluteBlockNumber(-1),
+ mInstructionNumber(-1),
+ mNumBlocks(0),
+ mCurrentBlock(-1),
+ mCurrentBlockEncodedSize(0),
+ mPositionInCurrentBlock(0),
+ mBlockSize(BACKUP_FILE_MIN_BLOCK_SIZE),
+ mLastBlockSize(0),
+ mTotalBytesSent(0),
+ mpRawBuffer(0),
+ mAllocatedBufferSize(0),
+ mEntryIVBase(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::~BackupStoreFileEncodeStream()
+// Purpose: Destructor
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+BackupStoreFileEncodeStream::~BackupStoreFileEncodeStream()
+{
+ // Free buffers
+ if(mpRawBuffer)
+ {
+ ::free(mpRawBuffer);
+ mpRawBuffer = 0;
+ }
+
+ // Close the file, which we might have open
+ if(mpFile)
+ {
+ delete mpFile;
+ mpFile = 0;
+ }
+
+ // Clear up logging stream
+ if(mpLogging)
+ {
+ delete mpLogging;
+ mpLogging = 0;
+ }
+
+ // Free the recipe
+ if(mpRecipe != 0)
+ {
+ delete mpRecipe;
+ mpRecipe = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::Setup(const char *, Recipe *, int64_t, const BackupStoreFilename &, int64_t *)
+// Purpose: Reads file information, and builds file header reading for sending.
+// Takes ownership of the Recipe.
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::Setup(const std::string& Filename,
+ BackupStoreFileEncodeStream::Recipe *pRecipe,
+ int64_t ContainerID, const BackupStoreFilename &rStoreFilename,
+ int64_t *pModificationTime, ReadLoggingStream::Logger* pLogger,
+ RunStatusProvider* pRunStatusProvider)
+{
+ // Pointer to a blank recipe which we might create
+ BackupStoreFileEncodeStream::Recipe *pblankRecipe = 0;
+
+ try
+ {
+ // Get file attributes
+ box_time_t modTime = 0;
+ int64_t fileSize = 0;
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(Filename, false /* no zeroing of modification times */, &modTime,
+ 0 /* not interested in attr mod time */, &fileSize);
+
+ // Might need to create a blank recipe...
+ if(pRecipe == 0)
+ {
+ pblankRecipe = new BackupStoreFileEncodeStream::Recipe(0, 0);
+
+ BackupStoreFileEncodeStream::RecipeInstruction instruction;
+ instruction.mSpaceBefore = fileSize; // whole file
+ instruction.mBlocks = 0; // no blocks
+ instruction.mpStartBlock = 0; // no block
+ pblankRecipe->push_back(instruction);
+
+ pRecipe = pblankRecipe;
+ }
+
+ // Tell caller?
+ if(pModificationTime != 0)
+ {
+ *pModificationTime = modTime;
+ }
+
+ // Go through each instruction in the recipe and work out how many blocks
+ // it will add, and the max clear size of these blocks
+ int maxBlockClearSize = 0;
+ for(uint64_t inst = 0; inst < pRecipe->size(); ++inst)
+ {
+ if((*pRecipe)[inst].mSpaceBefore > 0)
+ {
+ // Calculate the number of blocks the space before requires
+ int64_t numBlocks;
+ int32_t blockSize, lastBlockSize;
+ CalculateBlockSizes((*pRecipe)[inst].mSpaceBefore, numBlocks, blockSize, lastBlockSize);
+ // Add to accumlated total
+ mTotalBlocks += numBlocks;
+ // Update maximum clear size
+ if(blockSize > maxBlockClearSize) maxBlockClearSize = blockSize;
+ if(lastBlockSize > maxBlockClearSize) maxBlockClearSize = lastBlockSize;
+ }
+
+ // Add number of blocks copied from the previous file
+ mTotalBlocks += (*pRecipe)[inst].mBlocks;
+
+ // Check for bad things
+ if((*pRecipe)[inst].mBlocks < 0 || ((*pRecipe)[inst].mBlocks != 0 && (*pRecipe)[inst].mpStartBlock == 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ // Run through blocks to get the max clear size
+ for(int32_t b = 0; b < (*pRecipe)[inst].mBlocks; ++b)
+ {
+ if((*pRecipe)[inst].mpStartBlock[b].mSize > maxBlockClearSize) maxBlockClearSize = (*pRecipe)[inst].mpStartBlock[b].mSize;
+ }
+ }
+
+ // Send data? (symlinks don't have any data in them)
+ mSendData = !attr.IsSymLink();
+
+ // If not data is being sent, then the max clear block size is zero
+ if(!mSendData)
+ {
+ maxBlockClearSize = 0;
+ }
+
+ // Header
+ file_StreamFormat hdr;
+ hdr.mMagicValue = htonl(OBJECTMAGIC_FILE_MAGIC_VALUE_V1);
+ hdr.mNumBlocks = (mSendData)?(box_hton64(mTotalBlocks)):(0);
+ hdr.mContainerID = box_hton64(ContainerID);
+ hdr.mModificationTime = box_hton64(modTime);
+ // add a bit to make it harder to tell what's going on -- try not to give away too much info about file size
+ hdr.mMaxBlockClearSize = htonl(maxBlockClearSize + 128);
+ hdr.mOptions = 0; // no options defined yet
+
+ // Write header to stream
+ mData.Write(&hdr, sizeof(hdr));
+
+ // Write filename to stream
+ rStoreFilename.WriteToStream(mData);
+
+ // Write attributes to stream
+ attr.WriteToStream(mData);
+
+ // Allocate some buffers for writing data
+ if(mSendData)
+ {
+ // Open the file
+ mpFile = new FileStream(Filename);
+
+ if (pLogger)
+ {
+ // Create logging stream
+ mpLogging = new ReadLoggingStream(*mpFile,
+ *pLogger);
+ }
+ else
+ {
+ // re-use FileStream instead
+ mpLogging = mpFile;
+ mpFile = NULL;
+ }
+
+ // Work out the largest possible block required for the encoded data
+ mAllocatedBufferSize = BackupStoreFile::MaxBlockSizeForChunkSize(maxBlockClearSize);
+
+ // Then allocate two blocks of this size
+ mpRawBuffer = (uint8_t*)::malloc(mAllocatedBufferSize);
+ if(mpRawBuffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+#ifndef BOX_RELEASE_BUILD
+ // In debug builds, make sure that the reallocation code is exercised.
+ mEncodedBuffer.Allocate(mAllocatedBufferSize / 4);
+#else
+ mEncodedBuffer.Allocate(mAllocatedBufferSize);
+#endif
+ }
+ else
+ {
+ // Write an empty block index for the symlink
+ file_BlockIndexHeader blkhdr;
+ blkhdr.mMagicValue = htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1);
+ blkhdr.mOtherFileID = box_hton64(0); // not other file ID
+ blkhdr.mEntryIVBase = box_hton64(0);
+ blkhdr.mNumBlocks = box_hton64(0);
+ mData.Write(&blkhdr, sizeof(blkhdr));
+ }
+
+ // Ready for reading
+ mData.SetForReading();
+
+ // Update stats
+ BackupStoreFile::msStats.mBytesInEncodedFiles += fileSize;
+
+ // Finally, store the pointer to the recipe, when we know exceptions won't occur
+ mpRecipe = pRecipe;
+ }
+ catch(...)
+ {
+ // Clean up any blank recipe
+ if(pblankRecipe != 0)
+ {
+ delete pblankRecipe;
+ pblankRecipe = 0;
+ }
+ throw;
+ }
+
+ mpRunStatusProvider = pRunStatusProvider;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::CalculateBlockSizes(int64_t &, int32_t &, int32_t &)
+// Purpose: Calculates the sizes of blocks in a section of the file
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::CalculateBlockSizes(int64_t DataSize, int64_t &rNumBlocksOut, int32_t &rBlockSizeOut, int32_t &rLastBlockSizeOut)
+{
+ // How many blocks, and how big?
+ rBlockSizeOut = BACKUP_FILE_MIN_BLOCK_SIZE / 2;
+ do
+ {
+ rBlockSizeOut *= 2;
+
+ rNumBlocksOut = (DataSize + rBlockSizeOut - 1) / rBlockSizeOut;
+
+ } while(rBlockSizeOut < BACKUP_FILE_MAX_BLOCK_SIZE && rNumBlocksOut > BACKUP_FILE_INCREASE_BLOCK_SIZE_AFTER);
+
+ // Last block size
+ rLastBlockSizeOut = DataSize - ((rNumBlocksOut - 1) * rBlockSizeOut);
+
+ // Avoid small blocks?
+ if(rLastBlockSizeOut < BACKUP_FILE_AVOID_BLOCKS_LESS_THAN
+ && rNumBlocksOut > 1)
+ {
+ // Add the small bit of data to the last block
+ --rNumBlocksOut;
+ rLastBlockSizeOut += rBlockSizeOut;
+ }
+
+ // checks!
+ ASSERT((((rNumBlocksOut-1) * rBlockSizeOut) + rLastBlockSizeOut) == DataSize);
+ //TRACE4("CalcBlockSize, sz %lld, num %lld, blocksize %d, last %d\n", DataSize, rNumBlocksOut, (int32_t)rBlockSizeOut, (int32_t)rLastBlockSizeOut);
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::Read(void *, int, int)
+// Purpose: As interface -- generates encoded file data on the fly from the raw file
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+int BackupStoreFileEncodeStream::Read(void *pBuffer, int NBytes, int Timeout)
+{
+ // Check there's something to do.
+ if(mStatus == Status_Finished)
+ {
+ return 0;
+ }
+
+ if(mpRunStatusProvider && mpRunStatusProvider->StopRun())
+ {
+ THROW_EXCEPTION(BackupStoreException, SignalReceived);
+ }
+
+ int bytesToRead = NBytes;
+ uint8_t *buffer = (uint8_t*)pBuffer;
+
+ while(bytesToRead > 0 && mStatus != Status_Finished)
+ {
+ if(mStatus == Status_Header || mStatus == Status_BlockListing)
+ {
+ // Header or block listing phase -- send from the buffered stream
+
+ // Send bytes from the data buffer
+ int b = mData.Read(buffer, bytesToRead, Timeout);
+ bytesToRead -= b;
+ buffer += b;
+
+ // Check to see if all the data has been used from this stream
+ if(!mData.StreamDataLeft())
+ {
+ // Yes, move on to next phase (or finish, if there's no file data)
+ if(!mSendData)
+ {
+ mStatus = Status_Finished;
+ }
+ else
+ {
+ // Reset the buffer so it can be used for the next phase
+ mData.Reset();
+
+ // Get buffer ready for index?
+ if(mStatus == Status_Header)
+ {
+ // Just finished doing the stream header, create the block index header
+ file_BlockIndexHeader blkhdr;
+ blkhdr.mMagicValue = htonl(OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1);
+ ASSERT(mpRecipe != 0);
+ blkhdr.mOtherFileID = box_hton64(mpRecipe->GetOtherFileID());
+ blkhdr.mNumBlocks = box_hton64(mTotalBlocks);
+
+ // Generate the IV base
+ Random::Generate(&mEntryIVBase, sizeof(mEntryIVBase));
+ blkhdr.mEntryIVBase = box_hton64(mEntryIVBase);
+
+ mData.Write(&blkhdr, sizeof(blkhdr));
+ }
+
+ ++mStatus;
+ }
+ }
+ }
+ else if(mStatus == Status_Blocks)
+ {
+ // Block sending phase
+
+ if(mPositionInCurrentBlock >= mCurrentBlockEncodedSize)
+ {
+ // Next block!
+ ++mCurrentBlock;
+ ++mAbsoluteBlockNumber;
+ if(mCurrentBlock >= mNumBlocks)
+ {
+ // Output extra blocks for this instruction and move forward in file
+ if(mInstructionNumber >= 0)
+ {
+ SkipPreviousBlocksInInstruction();
+ }
+
+ // Is there another instruction to go?
+ ++mInstructionNumber;
+
+ // Skip instructions which don't contain any data
+ while(mInstructionNumber < static_cast<int64_t>(mpRecipe->size())
+ && (*mpRecipe)[mInstructionNumber].mSpaceBefore == 0)
+ {
+ SkipPreviousBlocksInInstruction();
+ ++mInstructionNumber;
+ }
+
+ if(mInstructionNumber >= static_cast<int64_t>(mpRecipe->size()))
+ {
+ // End of blocks, go to next phase
+ ++mStatus;
+
+ // Set the data to reading so the index can be written
+ mData.SetForReading();
+ }
+ else
+ {
+ // Get ready for this instruction
+ SetForInstruction();
+ }
+ }
+
+ // Can't use 'else' here as SetForInstruction() will change this
+ if(mCurrentBlock < mNumBlocks)
+ {
+ EncodeCurrentBlock();
+ }
+ }
+
+ // Send data from the current block (if there's data to send)
+ if(mPositionInCurrentBlock < mCurrentBlockEncodedSize)
+ {
+ // How much data to put in the buffer?
+ int s = mCurrentBlockEncodedSize - mPositionInCurrentBlock;
+ if(s > bytesToRead) s = bytesToRead;
+
+ // Copy it in
+ ::memcpy(buffer, mEncodedBuffer.mpBuffer + mPositionInCurrentBlock, s);
+
+ // Update variables
+ bytesToRead -= s;
+ buffer += s;
+ mPositionInCurrentBlock += s;
+ }
+ }
+ else
+ {
+ // Should never get here, as it'd be an invalid status
+ ASSERT(false);
+ }
+ }
+
+ // Add encoded size to stats
+ BackupStoreFile::msStats.mTotalFileStreamSize += (NBytes - bytesToRead);
+ mTotalBytesSent += (NBytes - bytesToRead);
+
+ // Return size of data to caller
+ return NBytes - bytesToRead;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::StorePreviousBlocksInInstruction()
+// Purpose: Private. Stores the blocks of the old file referenced in the current
+// instruction into the index and skips over the data in the file
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::SkipPreviousBlocksInInstruction()
+{
+ // Check something is necessary
+ if((*mpRecipe)[mInstructionNumber].mpStartBlock == 0 || (*mpRecipe)[mInstructionNumber].mBlocks == 0)
+ {
+ return;
+ }
+
+ // Index of the first block in old file (being diffed from)
+ int firstIndex = mpRecipe->BlockPtrToIndex((*mpRecipe)[mInstructionNumber].mpStartBlock);
+
+ int64_t sizeToSkip = 0;
+
+ for(int32_t b = 0; b < (*mpRecipe)[mInstructionNumber].mBlocks; ++b)
+ {
+ // Update stats
+ BackupStoreFile::msStats.mBytesAlreadyOnServer += (*mpRecipe)[mInstructionNumber].mpStartBlock[b].mSize;
+
+ // Store the entry
+ StoreBlockIndexEntry(0 - (firstIndex + b),
+ (*mpRecipe)[mInstructionNumber].mpStartBlock[b].mSize,
+ (*mpRecipe)[mInstructionNumber].mpStartBlock[b].mWeakChecksum,
+ (*mpRecipe)[mInstructionNumber].mpStartBlock[b].mStrongChecksum);
+
+ // Increment the absolute block number -- kept encryption IV in sync
+ ++mAbsoluteBlockNumber;
+
+ // Add the size of this block to the size to skip
+ sizeToSkip += (*mpRecipe)[mInstructionNumber].mpStartBlock[b].mSize;
+ }
+
+ // Move forward in the stream
+ mpLogging->Seek(sizeToSkip, IOStream::SeekType_Relative);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::SetForInstruction()
+// Purpose: Private. Sets the state of the internal variables for the current instruction in the recipe
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::SetForInstruction()
+{
+ // Calculate block sizes
+ CalculateBlockSizes((*mpRecipe)[mInstructionNumber].mSpaceBefore, mNumBlocks, mBlockSize, mLastBlockSize);
+
+ // Set variables
+ mCurrentBlock = 0;
+ mCurrentBlockEncodedSize = 0;
+ mPositionInCurrentBlock = 0;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::EncodeCurrentBlock()
+// Purpose: Private. Encodes the current block, and writes the block data to the index
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::EncodeCurrentBlock()
+{
+ // How big is the block, raw?
+ int blockRawSize = mBlockSize;
+ if(mCurrentBlock == (mNumBlocks - 1))
+ {
+ blockRawSize = mLastBlockSize;
+ }
+ ASSERT(blockRawSize < mAllocatedBufferSize);
+
+ // Check file open
+ if(mpLogging == 0)
+ {
+ // File should be open, but isn't. So logical error.
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ // Read the data in
+ if(!mpLogging->ReadFullBuffer(mpRawBuffer, blockRawSize,
+ 0 /* not interested in size if failure */))
+ {
+ // TODO: Do something more intelligent, and abort
+ // this upload because the file has changed.
+ THROW_EXCEPTION(BackupStoreException,
+ Temp_FileEncodeStreamDidntReadBuffer)
+ }
+
+ // Encode it
+ mCurrentBlockEncodedSize = BackupStoreFile::EncodeChunk(mpRawBuffer,
+ blockRawSize, mEncodedBuffer);
+
+ //TRACE2("Encode: Encoded size of block %d is %d\n", (int32_t)mCurrentBlock, (int32_t)mCurrentBlockEncodedSize);
+
+ // Create block listing data -- generate checksums
+ RollingChecksum weakChecksum(mpRawBuffer, blockRawSize);
+ MD5Digest strongChecksum;
+ strongChecksum.Add(mpRawBuffer, blockRawSize);
+ strongChecksum.Finish();
+
+ // Add entry to the index
+ StoreBlockIndexEntry(mCurrentBlockEncodedSize, blockRawSize,
+ weakChecksum.GetChecksum(), strongChecksum.DigestAsData());
+
+ // Set vars to reading this block
+ mPositionInCurrentBlock = 0;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::StoreBlockIndexEntry(int64_t, int32_t, uint32_t, uint8_t *)
+// Purpose: Private. Adds an entry to the index currently being stored for sending at end of the stream.
+// Created: 16/1/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::StoreBlockIndexEntry(int64_t EncSizeOrBlkIndex, int32_t ClearSize, uint32_t WeakChecksum, uint8_t *pStrongChecksum)
+{
+ // First, the encrypted section
+ file_BlockIndexEntryEnc entryEnc;
+ entryEnc.mSize = htonl(ClearSize);
+ entryEnc.mWeakChecksum = htonl(WeakChecksum);
+ ::memcpy(entryEnc.mStrongChecksum, pStrongChecksum, sizeof(entryEnc.mStrongChecksum));
+
+ // Then the clear section
+ file_BlockIndexEntry entry;
+ entry.mEncodedSize = box_hton64(((uint64_t)EncSizeOrBlkIndex));
+
+ // Then encrypt the encryted section
+ // Generate the IV from the block number
+ if(sBlowfishEncryptBlockEntry.GetIVLength() != sizeof(mEntryIVBase))
+ {
+ THROW_EXCEPTION(BackupStoreException, IVLengthForEncodedBlockSizeDoesntMeetLengthRequirements)
+ }
+ uint64_t iv = mEntryIVBase;
+ iv += mAbsoluteBlockNumber;
+ // Convert to network byte order before encrypting with it, so that restores work on
+ // platforms with different endiannesses.
+ iv = box_hton64(iv);
+ sBlowfishEncryptBlockEntry.SetIV(&iv);
+
+ // Encode the data
+ int encodedSize = sBlowfishEncryptBlockEntry.TransformBlock(entry.mEnEnc, sizeof(entry.mEnEnc), &entryEnc, sizeof(entryEnc));
+ if(encodedSize != sizeof(entry.mEnEnc))
+ {
+ THROW_EXCEPTION(BackupStoreException, BlockEntryEncodingDidntGiveExpectedLength)
+ }
+
+ // Save to data block for sending at the end of the stream
+ mData.Write(&entry, sizeof(entry));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::Write(const void *, int)
+// Purpose: As interface. Exceptions.
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFileEncodeStream::Write(const void *pBuffer, int NBytes)
+{
+ THROW_EXCEPTION(BackupStoreException, CantWriteToEncodedFileStream)
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::StreamDataLeft()
+// Purpose: As interface -- end of stream reached?
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFileEncodeStream::StreamDataLeft()
+{
+ return (mStatus != Status_Finished);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::StreamClosed()
+// Purpose: As interface
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFileEncodeStream::StreamClosed()
+{
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::Recipe::Recipe(BackupStoreFileCreation::BlocksAvailableEntry *, int64_t)
+// Purpose: Constructor. Takes ownership of the block index, and will delete it when it's deleted
+// Created: 15/1/04
+//
+// --------------------------------------------------------------------------
+BackupStoreFileEncodeStream::Recipe::Recipe(BackupStoreFileCreation::BlocksAvailableEntry *pBlockIndex,
+ int64_t NumBlocksInIndex, int64_t OtherFileID)
+ : mpBlockIndex(pBlockIndex),
+ mNumBlocksInIndex(NumBlocksInIndex),
+ mOtherFileID(OtherFileID)
+{
+ ASSERT((mpBlockIndex == 0) || (NumBlocksInIndex != 0))
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFileEncodeStream::Recipe::~Recipe()
+// Purpose: Destructor
+// Created: 15/1/04
+//
+// --------------------------------------------------------------------------
+BackupStoreFileEncodeStream::Recipe::~Recipe()
+{
+ // Free the block index, if there is one
+ if(mpBlockIndex != 0)
+ {
+ ::free(mpBlockIndex);
+ }
+}
+
+
+
+
diff --git a/lib/backupstore/BackupStoreFileEncodeStream.h b/lib/backupstore/BackupStoreFileEncodeStream.h
new file mode 100644
index 00000000..a169e036
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileEncodeStream.h
@@ -0,0 +1,137 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileEncodeStream.h
+// Purpose: Implement stream-based file encoding for the backup store
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILEENCODESTREAM__H
+#define BACKUPSTOREFILEENCODESTREAM__H
+
+#include <vector>
+
+#include "IOStream.h"
+#include "BackupStoreFilename.h"
+#include "CollectInBufferStream.h"
+#include "MD5Digest.h"
+#include "BackupStoreFile.h"
+#include "ReadLoggingStream.h"
+#include "RunStatusProvider.h"
+
+namespace BackupStoreFileCreation
+{
+ // Diffing and creation of files share some implementation details.
+ typedef struct _BlocksAvailableEntry
+ {
+ struct _BlocksAvailableEntry *mpNextInHashList;
+ int32_t mSize; // size in clear
+ uint32_t mWeakChecksum; // weak, rolling checksum
+ uint8_t mStrongChecksum[MD5Digest::DigestLength]; // strong digest based checksum
+ } BlocksAvailableEntry;
+
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreFileEncodeStream
+// Purpose: Encode a file into a stream
+// Created: 8/12/03
+//
+// --------------------------------------------------------------------------
+class BackupStoreFileEncodeStream : public IOStream
+{
+public:
+ BackupStoreFileEncodeStream();
+ ~BackupStoreFileEncodeStream();
+
+ typedef struct
+ {
+ int64_t mSpaceBefore; // amount of bytes which aren't taken out of blocks which go
+ int32_t mBlocks; // number of block to reuse, starting at this one
+ BackupStoreFileCreation::BlocksAvailableEntry *mpStartBlock; // may be null
+ } RecipeInstruction;
+
+ class Recipe : public std::vector<RecipeInstruction>
+ {
+ // NOTE: This class is rather tied in with the implementation of diffing.
+ public:
+ Recipe(BackupStoreFileCreation::BlocksAvailableEntry *pBlockIndex, int64_t NumBlocksInIndex,
+ int64_t OtherFileID = 0);
+ ~Recipe();
+
+ int64_t GetOtherFileID() {return mOtherFileID;}
+ int64_t BlockPtrToIndex(BackupStoreFileCreation::BlocksAvailableEntry *pBlock)
+ {
+ return pBlock - mpBlockIndex;
+ }
+
+ private:
+ BackupStoreFileCreation::BlocksAvailableEntry *mpBlockIndex;
+ int64_t mNumBlocksInIndex;
+ int64_t mOtherFileID;
+ };
+
+ void Setup(const std::string& Filename, Recipe *pRecipe, int64_t ContainerID,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t *pModificationTime,
+ ReadLoggingStream::Logger* pLogger = NULL,
+ RunStatusProvider* pRunStatusProvider = NULL);
+
+ virtual int Read(void *pBuffer, int NBytes, int Timeout);
+ virtual void Write(const void *pBuffer, int NBytes);
+ virtual bool StreamDataLeft();
+ virtual bool StreamClosed();
+ int64_t GetTotalBytesSent() { return mTotalBytesSent; }
+
+private:
+ enum
+ {
+ Status_Header = 0,
+ Status_Blocks = 1,
+ Status_BlockListing = 2,
+ Status_Finished = 3
+ };
+
+private:
+ void EncodeCurrentBlock();
+ void CalculateBlockSizes(int64_t DataSize, int64_t &rNumBlocksOut, int32_t &rBlockSizeOut, int32_t &rLastBlockSizeOut);
+ void SkipPreviousBlocksInInstruction();
+ void SetForInstruction();
+ void StoreBlockIndexEntry(int64_t WncSizeOrBlkIndex, int32_t ClearSize, uint32_t WeakChecksum, uint8_t *pStrongChecksum);
+
+private:
+ Recipe *mpRecipe;
+ IOStream *mpFile; // source file
+ CollectInBufferStream mData; // buffer for header and index entries
+ IOStream *mpLogging;
+ RunStatusProvider* mpRunStatusProvider;
+ int mStatus;
+ bool mSendData; // true if there's file data to send (ie not a symlink)
+ int64_t mTotalBlocks; // Total number of blocks in the file
+ int64_t mAbsoluteBlockNumber; // The absolute block number currently being output
+ // Instruction number
+ int64_t mInstructionNumber;
+ // All the below are within the current instruction
+ int64_t mNumBlocks; // number of blocks. Last one will be a different size to the rest in most cases
+ int64_t mCurrentBlock;
+ int32_t mCurrentBlockEncodedSize;
+ int32_t mPositionInCurrentBlock; // for reading out
+ int32_t mBlockSize; // Basic block size of most of the blocks in the file
+ int32_t mLastBlockSize; // the size (unencoded) of the last block in the file
+ int64_t mTotalBytesSent;
+ // Buffers
+ uint8_t *mpRawBuffer; // buffer for raw data
+ BackupStoreFile::EncodingBuffer mEncodedBuffer;
+ // buffer for encoded data
+ int32_t mAllocatedBufferSize; // size of above two allocated blocks
+ uint64_t mEntryIVBase; // base for block entry IV
+};
+
+
+
+#endif // BACKUPSTOREFILEENCODESTREAM__H
+
diff --git a/lib/backupstore/BackupStoreFileRevDiff.cpp b/lib/backupstore/BackupStoreFileRevDiff.cpp
new file mode 100644
index 00000000..509eef61
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileRevDiff.cpp
@@ -0,0 +1,258 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileRevDiff.cpp
+// Purpose: Reverse a patch, to build a new patch from new to old files
+// Created: 12/7/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <new>
+#include <stdlib.h>
+
+#include "BackupStoreFile.h"
+#include "BackupStoreFileWire.h"
+#include "BackupStoreObjectMagic.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreFilename.h"
+
+#include "MemLeakFindOn.h"
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFile::ReverseDiffFile(IOStream &, IOStream &, IOStream &, IOStream &, int64_t)
+// Purpose: Reverse a patch, to build a new patch from new to old files. Takes
+// two independent copies to the From file, for efficiency.
+// Created: 12/7/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFile::ReverseDiffFile(IOStream &rDiff, IOStream &rFrom, IOStream &rFrom2, IOStream &rOut, int64_t ObjectIDOfFrom, bool *pIsCompletelyDifferent)
+{
+ // Read and copy the header from the from file to the out file -- beginnings of the patch
+ file_StreamFormat hdr;
+ if(!rFrom.ReadFullBuffer(&hdr, sizeof(hdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ if(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+ // Copy
+ rOut.Write(&hdr, sizeof(hdr));
+ // Copy over filename and attributes
+ // BLOCK
+ {
+ BackupStoreFilename filename;
+ filename.ReadFromStream(rFrom, IOStream::TimeOutInfinite);
+ filename.WriteToStream(rOut);
+ StreamableMemBlock attr;
+ attr.ReadFromStream(rFrom, IOStream::TimeOutInfinite);
+ attr.WriteToStream(rOut);
+ }
+
+ // Build an index of common blocks.
+ // For each block in the from file, we want to know it's index in the
+ // diff file. Allocate memory for this information.
+ int64_t fromNumBlocks = box_ntoh64(hdr.mNumBlocks);
+ int64_t *pfromIndexInfo = (int64_t*)::malloc(fromNumBlocks * sizeof(int64_t));
+ if(pfromIndexInfo == 0)
+ {
+ throw std::bad_alloc();
+ }
+
+ // Buffer data
+ void *buffer = 0;
+ int bufferSize = 0;
+
+ // flag
+ bool isCompletelyDifferent = true;
+
+ try
+ {
+ // Initialise the index to be all 0, ie not filled in yet
+ for(int64_t i = 0; i < fromNumBlocks; ++i)
+ {
+ pfromIndexInfo[i] = 0;
+ }
+
+ // Within the from file, skip to the index
+ MoveStreamPositionToBlockIndex(rDiff);
+
+ // Read in header of index
+ file_BlockIndexHeader diffIdxHdr;
+ if(!rDiff.ReadFullBuffer(&diffIdxHdr, sizeof(diffIdxHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(diffIdxHdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // And then read in each entry
+ int64_t diffNumBlocks = box_ntoh64(diffIdxHdr.mNumBlocks);
+ for(int64_t b = 0; b < diffNumBlocks; ++b)
+ {
+ file_BlockIndexEntry e;
+ if(!rDiff.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Where's the block?
+ int64_t blockEn = box_ntoh64(e.mEncodedSize);
+ if(blockEn > 0)
+ {
+ // Block is in the delta file, is ignored for now -- not relevant to rebuilding the from file
+ }
+ else
+ {
+ // Block is in the original file, store which block it is in this file
+ int64_t fromIndex = 0 - blockEn;
+ if(fromIndex < 0 || fromIndex >= fromNumBlocks)
+ {
+ THROW_EXCEPTION(BackupStoreException, IncompatibleFromAndDiffFiles)
+ }
+
+ // Store information about where it is in the new file
+ // NOTE: This is slight different to how it'll be stored in the final index.
+ pfromIndexInfo[fromIndex] = -1 - b;
+ }
+ }
+
+ // Open the index for the second copy of the from file
+ MoveStreamPositionToBlockIndex(rFrom2);
+
+ // Read in header of index
+ file_BlockIndexHeader fromIdxHdr;
+ if(!rFrom2.ReadFullBuffer(&fromIdxHdr, sizeof(fromIdxHdr), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ if(ntohl(fromIdxHdr.mMagicValue) != OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1
+ || box_ntoh64(fromIdxHdr.mOtherFileID) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // So, we can now start building the data in the file
+ int64_t filePosition = rFrom.GetPosition();
+ for(int64_t b = 0; b < fromNumBlocks; ++b)
+ {
+ // Read entry from from index
+ file_BlockIndexEntry e;
+ if(!rFrom2.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Get size
+ int64_t blockSize = box_hton64(e.mEncodedSize);
+ if(blockSize < 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadBackupStoreFile)
+ }
+
+ // Copy this block?
+ if(pfromIndexInfo[b] == 0)
+ {
+ // Copy it, first move to file location
+ rFrom.Seek(filePosition, IOStream::SeekType_Absolute);
+
+ // Make sure there's memory available to copy this
+ if(bufferSize < blockSize || buffer == 0)
+ {
+ // Free old block
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ buffer = 0;
+ bufferSize = 0;
+ }
+ // Allocate new block
+ buffer = ::malloc(blockSize);
+ if(buffer == 0)
+ {
+ throw std::bad_alloc();
+ }
+ bufferSize = blockSize;
+ }
+ ASSERT(bufferSize >= blockSize);
+
+ // Copy the block
+ if(!rFrom.ReadFullBuffer(buffer, blockSize, 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, FailedToReadBlockOnCombine)
+ }
+ rOut.Write(buffer, blockSize);
+
+ // Store the size
+ pfromIndexInfo[b] = blockSize;
+ }
+ else
+ {
+ // Block isn't needed, so it's not completely different
+ isCompletelyDifferent = false;
+ }
+ filePosition += blockSize;
+ }
+
+ // Then write the index, modified header first
+ fromIdxHdr.mOtherFileID = isCompletelyDifferent?0:(box_hton64(ObjectIDOfFrom));
+ rOut.Write(&fromIdxHdr, sizeof(fromIdxHdr));
+
+ // Move to start of index entries
+ rFrom.Seek(filePosition + sizeof(file_BlockIndexHeader), IOStream::SeekType_Absolute);
+
+ // Then copy modified entries
+ for(int64_t b = 0; b < fromNumBlocks; ++b)
+ {
+ // Read entry from from index
+ file_BlockIndexEntry e;
+ if(!rFrom.ReadFullBuffer(&e, sizeof(e), 0))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // Modify...
+ int64_t s = pfromIndexInfo[b];
+ // Adjust to reflect real block index (remember 0 has a different meaning here)
+ if(s < 0) ++s;
+ // Insert
+ e.mEncodedSize = box_hton64(s);
+ // Write
+ rOut.Write(&e, sizeof(e));
+ }
+ }
+ catch(...)
+ {
+ ::free(pfromIndexInfo);
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ }
+ throw;
+ }
+
+ // Free memory used (oh for finally {} blocks)
+ ::free(pfromIndexInfo);
+ if(buffer != 0)
+ {
+ ::free(buffer);
+ }
+
+ // return completely different flag
+ if(pIsCompletelyDifferent != 0)
+ {
+ *pIsCompletelyDifferent = isCompletelyDifferent;
+ }
+}
+
+
+
diff --git a/lib/backupstore/BackupStoreFileWire.h b/lib/backupstore/BackupStoreFileWire.h
new file mode 100644
index 00000000..49e94aa5
--- /dev/null
+++ b/lib/backupstore/BackupStoreFileWire.h
@@ -0,0 +1,74 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFileWire.h
+// Purpose: On the wire / disc formats for backup store files
+// Created: 12/1/04
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILEWIRE__H
+#define BACKUPSTOREFILEWIRE__H
+
+#include "MD5Digest.h"
+
+// set packing to one byte
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "BeginStructPackForWire.h"
+#else
+BEGIN_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+typedef struct
+{
+ int32_t mMagicValue; // also the version number
+ int64_t mNumBlocks; // number of blocks contained in the file
+ int64_t mContainerID;
+ int64_t mModificationTime;
+ int32_t mMaxBlockClearSize; // Maximum clear size that can be expected for a block
+ int32_t mOptions; // bitmask of options used
+ // Then a BackupStoreFilename
+ // Then a BackupClientFileAttributes
+} file_StreamFormat;
+
+typedef struct
+{
+ int32_t mMagicValue; // different magic value
+ int64_t mOtherFileID; // the file ID of the 'other' file which may be referenced by the index
+ uint64_t mEntryIVBase; // base value for block IV
+ int64_t mNumBlocks; // repeat of value in file header
+} file_BlockIndexHeader;
+
+typedef struct
+{
+ int32_t mSize; // size in clear
+ uint32_t mWeakChecksum; // weak, rolling checksum
+ uint8_t mStrongChecksum[MD5Digest::DigestLength]; // strong digest based checksum
+} file_BlockIndexEntryEnc;
+
+typedef struct
+{
+ union
+ {
+ int64_t mEncodedSize; // size encoded, if > 0
+ int64_t mOtherBlockIndex; // 0 - block number in other file, if <= 0
+ };
+ uint8_t mEnEnc[sizeof(file_BlockIndexEntryEnc)]; // Encoded section
+} file_BlockIndexEntry;
+
+// Use default packing
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "EndStructPackForWire.h"
+#else
+END_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+// header for blocks of compressed data in files
+#define HEADER_CHUNK_IS_COMPRESSED 1 // bit
+#define HEADER_ENCODING_SHIFT 1 // shift value
+#define HEADER_BLOWFISH_ENCODING 1 // value stored in bits 1 -- 7
+#define HEADER_AES_ENCODING 2 // value stored in bits 1 -- 7
+
+
+#endif // BACKUPSTOREFILEWIRE__H
+
diff --git a/lib/backupstore/BackupStoreFilename.cpp b/lib/backupstore/BackupStoreFilename.cpp
new file mode 100644
index 00000000..72cd1acd
--- /dev/null
+++ b/lib/backupstore/BackupStoreFilename.cpp
@@ -0,0 +1,281 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFilename.cpp
+// Purpose: Filename for the backup store
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+#include "BackupStoreFilename.h"
+#include "Protocol.h"
+#include "BackupStoreException.h"
+#include "IOStream.h"
+#include "Guards.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::BackupStoreFilename()
+// Purpose: Default constructor -- creates an invalid filename
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilename::BackupStoreFilename()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::BackupStoreFilename(const BackupStoreFilename &)
+// Purpose: Copy constructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilename::BackupStoreFilename(const BackupStoreFilename &rToCopy)
+ : mEncryptedName(rToCopy.mEncryptedName)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::~BackupStoreFilename()
+// Purpose: Destructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilename::~BackupStoreFilename()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::CheckValid(bool)
+// Purpose: Checks the encoded filename for validity
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFilename::CheckValid(bool ExceptionIfInvalid) const
+{
+ bool ok = true;
+
+ if(mEncryptedName.size() < 2)
+ {
+ // Isn't long enough to have a header
+ ok = false;
+ }
+ else
+ {
+ // Check size is consistent
+ unsigned int dsize = BACKUPSTOREFILENAME_GET_SIZE(this->mEncryptedName);
+ if(dsize != mEncryptedName.size())
+ {
+ ok = false;
+ }
+
+ // And encoding is an accepted value
+ unsigned int encoding = BACKUPSTOREFILENAME_GET_ENCODING(this->mEncryptedName);
+ if(encoding < Encoding_Min || encoding > Encoding_Max)
+ {
+ ok = false;
+ }
+ }
+
+ // Exception?
+ if(!ok && ExceptionIfInvalid)
+ {
+ THROW_EXCEPTION(BackupStoreException, InvalidBackupStoreFilename)
+ }
+
+ return ok;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::ReadFromProtocol(Protocol &)
+// Purpose: Reads the filename from the protocol object
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::ReadFromProtocol(Protocol &rProtocol)
+{
+ // Read the header
+ char hdr[2];
+ rProtocol.Read(hdr, 2);
+
+ // How big is it?
+ int dsize = BACKUPSTOREFILENAME_GET_SIZE(hdr);
+
+ // Fetch rest of data, relying on the Protocol to error on stupidly large sizes for us
+ std::string data;
+ rProtocol.Read(data, dsize - 2);
+
+ // assign to this string, storing the header and the extra data
+ mEncryptedName.assign(hdr, 2);
+ mEncryptedName.append(data.c_str(), data.size());
+
+ // Check it
+ CheckValid();
+
+ // Alert derived classes
+ EncodedFilenameChanged();
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::WriteToProtocol(Protocol &)
+// Purpose: Writes the filename to the protocol object
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::WriteToProtocol(Protocol &rProtocol) const
+{
+ CheckValid();
+
+ rProtocol.Write(mEncryptedName.c_str(), mEncryptedName.size());
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::ReadFromStream(IOStream &)
+// Purpose: Reads the filename from a stream
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::ReadFromStream(IOStream &rStream, int Timeout)
+{
+ // Read the header
+ char hdr[2];
+ if(!rStream.ReadFullBuffer(hdr, 2, 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+
+ // How big is it?
+ unsigned int dsize = BACKUPSTOREFILENAME_GET_SIZE(hdr);
+
+ // Assume most filenames are small
+ char buf[256];
+ if(dsize < sizeof(buf))
+ {
+ // Fetch rest of data, relying on the Protocol to error on stupidly large sizes for us
+ if(!rStream.ReadFullBuffer(buf + 2, dsize - 2, 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ // Copy in header
+ buf[0] = hdr[0]; buf[1] = hdr[1];
+
+ // assign to this string, storing the header and the extra data
+ mEncryptedName.assign(buf, dsize);
+ }
+ else
+ {
+ // Block of memory to hold it
+ MemoryBlockGuard<char*> dataB(dsize+2);
+ char *data = dataB;
+
+ // Fetch rest of data, relying on the Protocol to error on stupidly large sizes for us
+ if(!rStream.ReadFullBuffer(data + 2, dsize - 2, 0 /* not interested in bytes read if this fails */, Timeout))
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldntReadEntireStructureFromStream)
+ }
+ // Copy in header
+ data[0] = hdr[0]; data[1] = hdr[1];
+
+ // assign to this string, storing the header and the extra data
+ mEncryptedName.assign(data, dsize);
+ }
+
+ // Check it
+ CheckValid();
+
+ // Alert derived classes
+ EncodedFilenameChanged();
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::WriteToStream(IOStream &)
+// Purpose: Writes the filename to a stream
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::WriteToStream(IOStream &rStream) const
+{
+ CheckValid();
+
+ rStream.Write(mEncryptedName.c_str(), mEncryptedName.size());
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::EncodedFilenameChanged()
+// Purpose: The encoded filename stored has changed
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::EncodedFilenameChanged()
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::IsEncrypted()
+// Purpose: Returns true if the filename is stored using an encrypting encoding
+// Created: 1/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreFilename::IsEncrypted() const
+{
+ return BACKUPSTOREFILENAME_GET_ENCODING(this->mEncryptedName) !=
+ Encoding_Clear;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilename::SetAsClearFilename(const char *)
+// Purpose: Sets this object to be a valid filename, but with a
+// filename in the clear. Used on the server to create
+// filenames when there's no way of encrypting it.
+// Created: 22/4/04
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilename::SetAsClearFilename(const char *Clear)
+{
+ // Make std::string from the clear name
+ std::string toEncode(Clear);
+
+ // Make an encoded string
+ char hdr[2];
+ BACKUPSTOREFILENAME_MAKE_HDR(hdr, toEncode.size()+2, Encoding_Clear);
+ std::string encoded(hdr, 2);
+ encoded += toEncode;
+ ASSERT(encoded.size() == toEncode.size() + 2);
+
+ // Store the encoded string
+ mEncryptedName.assign(encoded);
+
+ // Stuff which must be done
+ EncodedFilenameChanged();
+ CheckValid(false);
+}
+
+
+
diff --git a/lib/backupstore/BackupStoreFilename.h b/lib/backupstore/BackupStoreFilename.h
new file mode 100644
index 00000000..80db9516
--- /dev/null
+++ b/lib/backupstore/BackupStoreFilename.h
@@ -0,0 +1,107 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFilename.h
+// Purpose: Filename for the backup store
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILENAME__H
+#define BACKUPSTOREFILENAME__H
+
+#include <string>
+
+class Protocol;
+class IOStream;
+
+// #define BACKUPSTOREFILEAME_MALLOC_ALLOC_BASE_TYPE
+// don't define this -- the problem of memory usage still appears without this.
+// It's just that this class really showed up the problem. Instead, malloc allocation
+// is globally defined in BoxPlatform.h, for troublesome libraries.
+
+#ifdef BACKUPSTOREFILEAME_MALLOC_ALLOC_BASE_TYPE
+ // Use a malloc_allocated string, because the STL default allocators really screw up with
+ // memory allocation, particularly with this class.
+ // Makes a few things a bit messy and inefficient with conversions.
+ // Given up using this, and use global malloc allocation instead, but thought it
+ // worth leaving this code in just in case it's useful for the future.
+ typedef std::basic_string<char, std::string_char_traits<char>, std::malloc_alloc> BackupStoreFilename_base;
+ // If this is changed, change GetClearFilename() back to returning a reference.
+#else
+ typedef std::string BackupStoreFilename_base;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreFilename
+// Purpose: Filename for the backup store
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+class BackupStoreFilename /* : public BackupStoreFilename_base */
+{
+private:
+ std::string mEncryptedName;
+
+public:
+ BackupStoreFilename();
+ BackupStoreFilename(const BackupStoreFilename &rToCopy);
+ virtual ~BackupStoreFilename();
+
+ bool CheckValid(bool ExceptionIfInvalid = true) const;
+
+ void ReadFromProtocol(Protocol &rProtocol);
+ void WriteToProtocol(Protocol &rProtocol) const;
+
+ void ReadFromStream(IOStream &rStream, int Timeout);
+ void WriteToStream(IOStream &rStream) const;
+
+ void SetAsClearFilename(const char *Clear);
+
+ // Check that it's encrypted
+ bool IsEncrypted() const;
+
+ // These enumerated types belong in the base class so
+ // the CheckValid() function can make sure that the encoding
+ // is a valid encoding
+ enum
+ {
+ Encoding_Min = 1,
+ Encoding_Clear = 1,
+ Encoding_Blowfish = 2,
+ Encoding_Max = 2
+ };
+
+ const std::string& GetEncodedFilename() const
+ {
+ return mEncryptedName;
+ }
+
+ bool operator==(const BackupStoreFilename& rOther) const
+ {
+ return mEncryptedName == rOther.mEncryptedName;
+ }
+
+ bool operator!=(const BackupStoreFilename& rOther) const
+ {
+ return mEncryptedName != rOther.mEncryptedName;
+ }
+
+protected:
+ virtual void EncodedFilenameChanged();
+ void SetEncodedFilename(const std::string &rEncoded)
+ {
+ mEncryptedName = rEncoded;
+ }
+};
+
+// On the wire utilities for class and derived class
+#define BACKUPSTOREFILENAME_GET_SIZE(hdr) (( ((uint8_t)((hdr)[0])) | ( ((uint8_t)((hdr)[1])) << 8)) >> 2)
+#define BACKUPSTOREFILENAME_GET_ENCODING(hdr) (((hdr)[0]) & 0x3)
+
+#define BACKUPSTOREFILENAME_MAKE_HDR(hdr, size, encoding) {uint16_t h = (((uint16_t)size) << 2) | (encoding); ((hdr)[0]) = h & 0xff; ((hdr)[1]) = h >> 8;}
+
+#endif // BACKUPSTOREFILENAME__H
+
diff --git a/lib/backupstore/BackupStoreFilenameClear.cpp b/lib/backupstore/BackupStoreFilenameClear.cpp
new file mode 100644
index 00000000..ad5666cf
--- /dev/null
+++ b/lib/backupstore/BackupStoreFilenameClear.cpp
@@ -0,0 +1,347 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFilenameClear.cpp
+// Purpose: BackupStoreFilenames in the clear
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+#include "BackupStoreFilenameClear.h"
+#include "BackupStoreException.h"
+#include "CipherContext.h"
+#include "CipherBlowfish.h"
+#include "Guards.h"
+#include "Logging.h"
+
+#include "MemLeakFindOn.h"
+
+// Hide private variables from the rest of the world
+namespace
+{
+ int sEncodeMethod = BackupStoreFilename::Encoding_Clear;
+ CipherContext sBlowfishEncrypt;
+ CipherContext sBlowfishDecrypt;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::BackupStoreFilenameClear()
+// Purpose: Default constructor, creates an invalid filename
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilenameClear::BackupStoreFilenameClear()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::BackupStoreFilenameClear(const std::string &)
+// Purpose: Creates a filename, encoding from the given string
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilenameClear::BackupStoreFilenameClear(const std::string &rToEncode)
+{
+ SetClearFilename(rToEncode);
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::BackupStoreFilenameClear(const BackupStoreFilenameClear &)
+// Purpose: Copy constructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilenameClear::BackupStoreFilenameClear(const BackupStoreFilenameClear &rToCopy)
+: BackupStoreFilename(rToCopy),
+ mClearFilename(rToCopy.mClearFilename)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::BackupStoreFilenameClear(const BackupStoreFilename &rToCopy)
+// Purpose: Copy from base class
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilenameClear::BackupStoreFilenameClear(const BackupStoreFilename &rToCopy)
+: BackupStoreFilename(rToCopy)
+{
+ // Will get a clear filename when it's required
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::~BackupStoreFilenameClear()
+// Purpose: Destructor
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+BackupStoreFilenameClear::~BackupStoreFilenameClear()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::GetClearFilename()
+// Purpose: Get the unencoded filename
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+#ifdef BACKUPSTOREFILEAME_MALLOC_ALLOC_BASE_TYPE
+const std::string BackupStoreFilenameClear::GetClearFilename() const
+{
+ MakeClearAvailable();
+ // When modifying, remember to change back to reference return if at all possible
+ // -- returns an object rather than a reference to allow easy use with other code.
+ return std::string(mClearFilename.c_str(), mClearFilename.size());
+}
+#else
+const std::string &BackupStoreFilenameClear::GetClearFilename() const
+{
+ MakeClearAvailable();
+ return mClearFilename;
+}
+const std::string &BackupStoreFilenameClear::GetClearFilenameIfPossible(const std::string& alternative) const
+{
+ if(mClearFilename.empty() && !(sBlowfishDecrypt.IsInitialised()))
+ {
+ // encrypted and cannot decrypt
+ return alternative;
+ }
+ else
+ {
+ return GetClearFilename();
+ }
+}
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::SetClearFilename(const std::string &)
+// Purpose: Encode and make available the clear filename
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::SetClearFilename(const std::string &rToEncode)
+{
+ // Only allow Blowfish encodings
+ if(sEncodeMethod != Encoding_Blowfish)
+ {
+ THROW_EXCEPTION(BackupStoreException, FilenameEncryptionNotSetup)
+ }
+
+ // Make an encoded string with blowfish encryption
+ EncryptClear(rToEncode, sBlowfishEncrypt, Encoding_Blowfish);
+
+ // Store the clear filename
+ mClearFilename.assign(rToEncode.c_str(), rToEncode.size());
+
+ // Make sure we did the right thing
+ if(!CheckValid(false))
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::MakeClearAvailable()
+// Purpose: Private. Make sure the clear filename is available
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::MakeClearAvailable() const
+{
+ if(!mClearFilename.empty())
+ return; // nothing to do
+
+ // Check valid
+ CheckValid();
+
+ // Decode the header
+ int size = BACKUPSTOREFILENAME_GET_SIZE(GetEncodedFilename());
+ int encoding = BACKUPSTOREFILENAME_GET_ENCODING(GetEncodedFilename());
+
+ // Decode based on encoding given in the header
+ switch(encoding)
+ {
+ case Encoding_Clear:
+ BOX_WARNING("**** BackupStoreFilename encoded with "
+ "Clear encoding ****");
+ mClearFilename.assign(GetEncodedFilename().c_str() + 2,
+ size - 2);
+ break;
+
+ case Encoding_Blowfish:
+ DecryptEncoded(sBlowfishDecrypt);
+ break;
+
+ default:
+ THROW_EXCEPTION(BackupStoreException, UnknownFilenameEncoding)
+ break;
+ }
+}
+
+
+// Buffer for encoding and decoding -- do this all in one single buffer to
+// avoid lots of string allocation, which stuffs up memory usage.
+// These static memory vars are, of course, not thread safe, but we don't use threads.
+static int sEncDecBufferSize = 0;
+static MemoryBlockGuard<uint8_t *> *spEncDecBuffer = 0;
+
+static void EnsureEncDecBufferSize(int BufSize)
+{
+ if(spEncDecBuffer == 0)
+ {
+#ifndef WIN32
+ BOX_TRACE("Allocating filename encoding/decoding buffer "
+ "with size " << BufSize);
+#endif
+ spEncDecBuffer = new MemoryBlockGuard<uint8_t *>(BufSize);
+ MEMLEAKFINDER_NOT_A_LEAK(spEncDecBuffer);
+ MEMLEAKFINDER_NOT_A_LEAK(*spEncDecBuffer);
+ sEncDecBufferSize = BufSize;
+ }
+ else
+ {
+ if(sEncDecBufferSize < BufSize)
+ {
+ BOX_TRACE("Reallocating filename encoding/decoding "
+ "buffer from " << sEncDecBufferSize <<
+ " to " << BufSize);
+ spEncDecBuffer->Resize(BufSize);
+ sEncDecBufferSize = BufSize;
+ MEMLEAKFINDER_NOT_A_LEAK(*spEncDecBuffer);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::EncryptClear(const std::string &, CipherContext &, int)
+// Purpose: Private. Assigns the encoded filename string, encrypting.
+// Created: 1/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::EncryptClear(const std::string &rToEncode, CipherContext &rCipherContext, int StoreAsEncoding)
+{
+ // Work out max size
+ int maxOutSize = rCipherContext.MaxOutSizeForInBufferSize(rToEncode.size()) + 4;
+
+ // Make sure encode/decode buffer has enough space
+ EnsureEncDecBufferSize(maxOutSize);
+
+ // Pointer to buffer
+ uint8_t *buffer = *spEncDecBuffer;
+
+ // Encode -- do entire block in one go
+ int encSize = rCipherContext.TransformBlock(buffer + 2, sEncDecBufferSize - 2, rToEncode.c_str(), rToEncode.size());
+ // and add in header size
+ encSize += 2;
+
+ // Adjust header
+ BACKUPSTOREFILENAME_MAKE_HDR(buffer, encSize, StoreAsEncoding);
+
+ // Store the encoded string
+ SetEncodedFilename(std::string((char*)buffer, encSize));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::DecryptEncoded(CipherContext &)
+// Purpose: Decrypt the encoded filename using the cipher context
+// Created: 1/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::DecryptEncoded(CipherContext &rCipherContext) const
+{
+ const std::string& rEncoded = GetEncodedFilename();
+
+ // Work out max size
+ int maxOutSize = rCipherContext.MaxOutSizeForInBufferSize(rEncoded.size()) + 4;
+
+ // Make sure encode/decode buffer has enough space
+ EnsureEncDecBufferSize(maxOutSize);
+
+ // Pointer to buffer
+ uint8_t *buffer = *spEncDecBuffer;
+
+ // Decrypt
+ const char *str = rEncoded.c_str() + 2;
+ int sizeOut = rCipherContext.TransformBlock(buffer, sEncDecBufferSize, str, rEncoded.size() - 2);
+
+ // Assign to this
+ mClearFilename.assign((char*)buffer, sizeOut);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::EncodedFilenameChanged()
+// Purpose: The encoded filename stored has changed
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::EncodedFilenameChanged()
+{
+ BackupStoreFilename::EncodedFilenameChanged();
+
+ // Delete stored filename in clear
+ mClearFilename.erase();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::SetBlowfishKey(const void *, int)
+// Purpose: Set the key used for Blowfish encryption of filenames
+// Created: 1/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::SetBlowfishKey(const void *pKey, int KeyLength, const void *pIV, int IVLength)
+{
+ // Initialisation vector not used. Can't use a different vector for each filename as
+ // that would stop comparisions on the server working.
+ sBlowfishEncrypt.Reset();
+ sBlowfishEncrypt.Init(CipherContext::Encrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+ ASSERT(sBlowfishEncrypt.GetIVLength() == IVLength);
+ sBlowfishEncrypt.SetIV(pIV);
+ sBlowfishDecrypt.Reset();
+ sBlowfishDecrypt.Init(CipherContext::Decrypt, CipherBlowfish(CipherDescription::Mode_CBC, pKey, KeyLength));
+ ASSERT(sBlowfishDecrypt.GetIVLength() == IVLength);
+ sBlowfishDecrypt.SetIV(pIV);
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreFilenameClear::SetEncodingMethod(int)
+// Purpose: Set the encoding method used for filenames
+// Created: 1/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreFilenameClear::SetEncodingMethod(int Method)
+{
+ sEncodeMethod = Method;
+}
+
+
+
diff --git a/lib/backupstore/BackupStoreFilenameClear.h b/lib/backupstore/BackupStoreFilenameClear.h
new file mode 100644
index 00000000..595d1158
--- /dev/null
+++ b/lib/backupstore/BackupStoreFilenameClear.h
@@ -0,0 +1,61 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreFilenameClear.h
+// Purpose: BackupStoreFilenames in the clear
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREFILENAMECLEAR__H
+#define BACKUPSTOREFILENAMECLEAR__H
+
+#include "BackupStoreFilename.h"
+
+class CipherContext;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreFilenameClear
+// Purpose: BackupStoreFilenames, handling conversion from and to the in the clear version
+// Created: 2003/08/26
+//
+// --------------------------------------------------------------------------
+class BackupStoreFilenameClear : public BackupStoreFilename
+{
+public:
+ BackupStoreFilenameClear();
+ BackupStoreFilenameClear(const std::string &rToEncode);
+ BackupStoreFilenameClear(const BackupStoreFilenameClear &rToCopy);
+ BackupStoreFilenameClear(const BackupStoreFilename &rToCopy);
+ virtual ~BackupStoreFilenameClear();
+
+ // Because we need to use a different allocator for this class to avoid
+ // nasty things happening, can't return this as a reference. Which is a
+ // pity. But probably not too bad.
+#ifdef BACKUPSTOREFILEAME_MALLOC_ALLOC_BASE_TYPE
+ const std::string GetClearFilename() const;
+#else
+ const std::string &GetClearFilename() const;
+ const std::string &GetClearFilenameIfPossible(const std::string& alternative) const;
+#endif
+ void SetClearFilename(const std::string &rToEncode);
+
+ // Setup for encryption of filenames
+ static void SetBlowfishKey(const void *pKey, int KeyLength, const void *pIV, int IVLength);
+ static void SetEncodingMethod(int Method);
+
+protected:
+ void MakeClearAvailable() const;
+ virtual void EncodedFilenameChanged();
+ void EncryptClear(const std::string &rToEncode, CipherContext &rCipherContext, int StoreAsEncoding);
+ void DecryptEncoded(CipherContext &rCipherContext) const;
+
+private:
+ mutable BackupStoreFilename_base mClearFilename;
+};
+
+#endif // BACKUPSTOREFILENAMECLEAR__H
+
+
diff --git a/lib/backupstore/BackupStoreInfo.cpp b/lib/backupstore/BackupStoreInfo.cpp
index 1d55fdf0..c58ae99a 100644
--- a/lib/backupstore/BackupStoreInfo.cpp
+++ b/lib/backupstore/BackupStoreInfo.cpp
@@ -11,6 +11,10 @@
#include <algorithm>
+<<<<<<< HEAD
+=======
+#include "Archive.h"
+>>>>>>> 0.12
#include "BackupStoreInfo.h"
#include "BackupStoreException.h"
#include "RaidFileWrite.h"
@@ -18,6 +22,7 @@
#include "MemLeakFindOn.h"
+<<<<<<< HEAD
// set packing to one byte
#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
#include "BeginStructPackForWire.h"
@@ -55,14 +60,19 @@ typedef struct
END_STRUCTURE_PACKING_FOR_WIRE
#endif
+=======
+>>>>>>> 0.12
#ifdef BOX_RELEASE_BUILD
#define NUM_DELETED_DIRS_BLOCK 256
#else
#define NUM_DELETED_DIRS_BLOCK 2
#endif
+<<<<<<< HEAD
#define INFO_FILENAME "info"
+=======
+>>>>>>> 0.12
// --------------------------------------------------------------------------
//
// Function
@@ -79,8 +89,20 @@ BackupStoreInfo::BackupStoreInfo()
mClientStoreMarker(0),
mLastObjectIDUsed(-1),
mBlocksUsed(0),
+<<<<<<< HEAD
mBlocksInOldFiles(0),
mBlocksInDeletedFiles(0)
+=======
+ mBlocksInCurrentFiles(0),
+ mBlocksInOldFiles(0),
+ mBlocksInDeletedFiles(0),
+ mBlocksInDirectories(0),
+ mNumFiles(0),
+ mNumOldFiles(0),
+ mNumDeletedFiles(0),
+ mNumDirectories(0),
+ mAccountEnabled(true)
+>>>>>>> 0.12
{
}
@@ -106,6 +128,7 @@ BackupStoreInfo::~BackupStoreInfo()
// --------------------------------------------------------------------------
void BackupStoreInfo::CreateNew(int32_t AccountID, const std::string &rRootDir, int DiscSet, int64_t BlockSoftLimit, int64_t BlockHardLimit)
{
+<<<<<<< HEAD
// Initial header (is entire file)
info_StreamFormat hdr = {
htonl(INFO_MAGIC_VALUE), // mMagicValue
@@ -139,11 +162,29 @@ void BackupStoreInfo::CreateNew(int32_t AccountID, const std::string &rRootDir,
rf.Commit(true);
// Done.
+=======
+ BackupStoreInfo info;
+ info.mAccountID = AccountID;
+ info.mDiscSet = DiscSet;
+ info.mReadOnly = false;
+ info.mLastObjectIDUsed = 1;
+ info.mBlocksSoftLimit = BlockSoftLimit;
+ info.mBlocksHardLimit = BlockHardLimit;
+
+ // Generate the filename
+ ASSERT(rRootDir[rRootDir.size() - 1] == '/' ||
+ rRootDir[rRootDir.size() - 1] == DIRECTORY_SEPARATOR_ASCHAR);
+ info.mFilename = rRootDir + INFO_FILENAME;
+ info.mExtraData.SetForReading(); // extra data is empty in this case
+
+ info.Save(false);
+>>>>>>> 0.12
}
// --------------------------------------------------------------------------
//
// Function
+<<<<<<< HEAD
// Name: BackupStoreInfo::Load(int32_t, const std::string &, int, bool)
// Purpose: Loads the info from disc, given the root information. Can be marked as read only.
// Created: 2003/08/28
@@ -170,6 +211,51 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID, const st
THROW_EXCEPTION(BackupStoreException, BadStoreInfoOnLoad)
}
+=======
+// Name: BackupStoreInfo::Load(int32_t, const std::string &,
+// int, bool)
+// Purpose: Loads the info from disc, given the root
+// information. Can be marked as read only.
+// Created: 2003/08/28
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID,
+ const std::string &rRootDir, int DiscSet, bool ReadOnly,
+ int64_t *pRevisionID)
+{
+ // Generate the filename
+ std::string fn(rRootDir + INFO_FILENAME);
+
+ // Open the file for reading (passing on optional request for revision ID)
+ std::auto_ptr<RaidFileRead> rf(RaidFileRead::Open(DiscSet, fn, pRevisionID));
+
+ // Read in format and version
+ int32_t magic;
+ if(!rf->ReadFullBuffer(&magic, sizeof(magic), 0))
+ {
+ THROW_FILE_ERROR("Failed to read store info file: "
+ "short read of magic number", fn,
+ BackupStoreException, CouldNotLoadStoreInfo);
+ }
+
+ bool v1 = false, v2 = false;
+
+ if(ntohl(magic) == INFO_MAGIC_VALUE_1)
+ {
+ v1 = true;
+ }
+ else if(ntohl(magic) == INFO_MAGIC_VALUE_2)
+ {
+ v2 = true;
+ }
+ else
+ {
+ THROW_FILE_ERROR("Failed to read store info file: "
+ "unknown magic " << BOX_FORMAT_HEX32(ntohl(magic)),
+ fn, BackupStoreException, BadStoreInfoOnLoad);
+ }
+
+>>>>>>> 0.12
// Make new object
std::auto_ptr<BackupStoreInfo> info(new BackupStoreInfo);
@@ -178,6 +264,7 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID, const st
info->mDiscSet = DiscSet;
info->mFilename = fn;
info->mReadOnly = ReadOnly;
+<<<<<<< HEAD
// Insert info from file
info->mClientStoreMarker = box_ntoh64(hdr.mClientStoreMarker);
@@ -193,6 +280,75 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID, const st
int64_t numDelObj = box_ntoh64(hdr.mNumberDeletedDirectories);
// Then load them in
+=======
+ int64_t numDelObj = 0;
+
+ if (v1)
+ {
+ // Read in a header
+ info_StreamFormat_1 hdr;
+ rf->Seek(0, IOStream::SeekType_Absolute);
+
+ if(!rf->ReadFullBuffer(&hdr, sizeof(hdr),
+ 0 /* not interested in bytes read if this fails */))
+ {
+ THROW_FILE_ERROR("Failed to read store info header",
+ fn, BackupStoreException, CouldNotLoadStoreInfo);
+ }
+
+ // Check it
+ if((int32_t)ntohl(hdr.mAccountID) != AccountID)
+ {
+ THROW_FILE_ERROR("Found wrong account ID in store info",
+ fn, BackupStoreException, BadStoreInfoOnLoad);
+ }
+
+ // Insert info from file
+ info->mClientStoreMarker = box_ntoh64(hdr.mClientStoreMarker);
+ info->mLastObjectIDUsed = box_ntoh64(hdr.mLastObjectIDUsed);
+ info->mBlocksUsed = box_ntoh64(hdr.mBlocksUsed);
+ info->mBlocksInOldFiles = box_ntoh64(hdr.mBlocksInOldFiles);
+ info->mBlocksInDeletedFiles = box_ntoh64(hdr.mBlocksInDeletedFiles);
+ info->mBlocksInDirectories = box_ntoh64(hdr.mBlocksInDirectories);
+ info->mBlocksSoftLimit = box_ntoh64(hdr.mBlocksSoftLimit);
+ info->mBlocksHardLimit = box_ntoh64(hdr.mBlocksHardLimit);
+
+ // Load up array of deleted objects
+ numDelObj = box_ntoh64(hdr.mNumberDeletedDirectories);
+ }
+ else if(v2)
+ {
+ Archive archive(*rf, IOStream::TimeOutInfinite);
+
+ // Check it
+ int32_t FileAccountID;
+ archive.Read(FileAccountID);
+ if (FileAccountID != AccountID)
+ {
+ THROW_FILE_ERROR("Found wrong account ID in store "
+ "info: " << BOX_FORMAT_HEX32(FileAccountID),
+ fn, BackupStoreException, BadStoreInfoOnLoad);
+ }
+
+ archive.Read(info->mAccountName);
+ archive.Read(info->mClientStoreMarker);
+ archive.Read(info->mLastObjectIDUsed);
+ archive.Read(info->mBlocksUsed);
+ archive.Read(info->mBlocksInCurrentFiles);
+ archive.Read(info->mBlocksInOldFiles);
+ archive.Read(info->mBlocksInDeletedFiles);
+ archive.Read(info->mBlocksInDirectories);
+ archive.Read(info->mBlocksSoftLimit);
+ archive.Read(info->mBlocksHardLimit);
+ archive.Read(info->mNumFiles);
+ archive.Read(info->mNumOldFiles);
+ archive.Read(info->mNumDeletedFiles);
+ archive.Read(info->mNumDirectories);
+ archive.Read(numDelObj);
+ }
+
+ // Then load the list of deleted directories
+>>>>>>> 0.12
if(numDelObj > 0)
{
int64_t objs[NUM_DELETED_DIRS_BLOCK];
@@ -224,6 +380,29 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID, const st
{
THROW_EXCEPTION(BackupStoreException, BadStoreInfoOnLoad)
}
+<<<<<<< HEAD
+=======
+
+ if(v2)
+ {
+ Archive archive(*rf, IOStream::TimeOutInfinite);
+ archive.ReadIfPresent(info->mAccountEnabled, true);
+ }
+ else
+ {
+ info->mAccountEnabled = true;
+ }
+
+ // If there's any data left in the info file, from future additions to
+ // the file format, then we need to load it so that it won't be lost when
+ // we resave the file.
+ IOStream::pos_type bytesLeft = rf->BytesLeftToRead();
+ if (bytesLeft > 0)
+ {
+ rf->CopyStreamTo(info->mExtraData);
+ }
+ info->mExtraData.SetForReading();
+>>>>>>> 0.12
// return it to caller
return info;
@@ -238,18 +417,33 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::Load(int32_t AccountID, const st
// Created: 23/4/04
//
// --------------------------------------------------------------------------
+<<<<<<< HEAD
std::auto_ptr<BackupStoreInfo> BackupStoreInfo::CreateForRegeneration(int32_t AccountID, const std::string &rRootDir,
int DiscSet, int64_t LastObjectID, int64_t BlocksUsed, int64_t BlocksInOldFiles,
int64_t BlocksInDeletedFiles, int64_t BlocksInDirectories, int64_t BlockSoftLimit, int64_t BlockHardLimit)
{
// Generate the filename
std::string fn(rRootDir + DIRECTORY_SEPARATOR INFO_FILENAME);
+=======
+std::auto_ptr<BackupStoreInfo> BackupStoreInfo::CreateForRegeneration(
+ int32_t AccountID, const std::string& rAccountName,
+ const std::string &rRootDir, int DiscSet,
+ int64_t LastObjectID, int64_t BlocksUsed,
+ int64_t BlocksInCurrentFiles, int64_t BlocksInOldFiles,
+ int64_t BlocksInDeletedFiles, int64_t BlocksInDirectories,
+ int64_t BlockSoftLimit, int64_t BlockHardLimit,
+ bool AccountEnabled, IOStream& ExtraData)
+{
+ // Generate the filename
+ std::string fn(rRootDir + INFO_FILENAME);
+>>>>>>> 0.12
// Make new object
std::auto_ptr<BackupStoreInfo> info(new BackupStoreInfo);
// Put in basic info
info->mAccountID = AccountID;
+<<<<<<< HEAD
info->mDiscSet = DiscSet;
info->mFilename = fn;
info->mReadOnly = false;
@@ -258,12 +452,32 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::CreateForRegeneration(int32_t Ac
info->mClientStoreMarker = 0;
info->mLastObjectIDUsed = LastObjectID;
info->mBlocksUsed = BlocksUsed;
+=======
+ info->mAccountName = rAccountName;
+ info->mDiscSet = DiscSet;
+ info->mFilename = fn;
+ info->mReadOnly = false;
+
+ // Insert info starting info
+ info->mClientStoreMarker = 0;
+ info->mLastObjectIDUsed = LastObjectID;
+ info->mBlocksUsed = BlocksUsed;
+ info->mBlocksInCurrentFiles = BlocksInCurrentFiles;
+>>>>>>> 0.12
info->mBlocksInOldFiles = BlocksInOldFiles;
info->mBlocksInDeletedFiles = BlocksInDeletedFiles;
info->mBlocksInDirectories = BlocksInDirectories;
info->mBlocksSoftLimit = BlockSoftLimit;
info->mBlocksHardLimit = BlockHardLimit;
+<<<<<<< HEAD
+
+=======
+ info->mAccountEnabled = AccountEnabled;
+ ExtraData.CopyStreamTo(info->mExtraData);
+ info->mExtraData.SetForReading();
+
+>>>>>>> 0.12
// return it to caller
return info;
}
@@ -272,12 +486,20 @@ std::auto_ptr<BackupStoreInfo> BackupStoreInfo::CreateForRegeneration(int32_t Ac
// --------------------------------------------------------------------------
//
// Function
+<<<<<<< HEAD
// Name: BackupStoreInfo::Save()
+=======
+// Name: BackupStoreInfo::Save(bool allowOverwrite)
+>>>>>>> 0.12
// Purpose: Save modified info back to disc
// Created: 2003/08/28
//
// --------------------------------------------------------------------------
+<<<<<<< HEAD
void BackupStoreInfo::Save()
+=======
+void BackupStoreInfo::Save(bool allowOverwrite)
+>>>>>>> 0.12
{
// Make sure we're initialised (although should never come to this)
if(mFilename.empty() || mAccountID == -1 || mDiscSet == -1)
@@ -293,6 +515,7 @@ void BackupStoreInfo::Save()
// Then... open a write file
RaidFileWrite rf(mDiscSet, mFilename);
+<<<<<<< HEAD
rf.Open(true); // allow overwriting
// Make header
@@ -314,6 +537,34 @@ void BackupStoreInfo::Save()
// Write header
rf.Write(&hdr, sizeof(hdr));
+=======
+ rf.Open(allowOverwrite);
+
+ // Make header
+ int32_t magic = htonl(INFO_MAGIC_VALUE_2);
+ rf.Write(&magic, sizeof(magic));
+ Archive archive(rf, IOStream::TimeOutInfinite);
+
+ archive.Write(mAccountID);
+ archive.Write(mAccountName);
+ archive.Write(mClientStoreMarker);
+ archive.Write(mLastObjectIDUsed);
+ archive.Write(mBlocksUsed);
+ archive.Write(mBlocksInCurrentFiles);
+ archive.Write(mBlocksInOldFiles);
+ archive.Write(mBlocksInDeletedFiles);
+ archive.Write(mBlocksInDirectories);
+ archive.Write(mBlocksSoftLimit);
+ archive.Write(mBlocksHardLimit);
+ archive.Write(mNumFiles);
+ archive.Write(mNumOldFiles);
+ archive.Write(mNumDeletedFiles);
+ archive.Write(mNumDirectories);
+
+ int64_t numDelObj = mDeletedDirectories.size();
+ archive.Write(numDelObj);
+
+>>>>>>> 0.12
// Write the deleted object list
if(mDeletedDirectories.size() > 0)
{
@@ -341,6 +592,15 @@ void BackupStoreInfo::Save()
tosave -= b;
}
}
+<<<<<<< HEAD
+=======
+
+ archive.Write(mAccountEnabled);
+
+ mExtraData.Seek(0, IOStream::SeekType_Absolute);
+ mExtraData.CopyStreamTo(rf);
+ mExtraData.Seek(0, IOStream::SeekType_Absolute);
+>>>>>>> 0.12
// Commit it to disc, converting it to RAID now
rf.Commit(true);
@@ -349,7 +609,59 @@ void BackupStoreInfo::Save()
mIsModified = false;
}
+<<<<<<< HEAD
+=======
+int BackupStoreInfo::ReportChangesTo(BackupStoreInfo& rOldInfo)
+{
+ int numChanges = 0;
+
+ #define COMPARE(attribute) \
+ if (rOldInfo.Get ## attribute () != Get ## attribute ()) \
+ { \
+ BOX_ERROR(#attribute " changed from " << \
+ rOldInfo.Get ## attribute () << " to " << \
+ Get ## attribute ()); \
+ numChanges++; \
+ }
+
+ COMPARE(AccountID);
+ COMPARE(AccountName);
+ COMPARE(LastObjectIDUsed);
+ COMPARE(BlocksUsed);
+ COMPARE(BlocksInCurrentFiles);
+ COMPARE(BlocksInOldFiles);
+ COMPARE(BlocksInDeletedFiles);
+ COMPARE(BlocksInDirectories);
+ COMPARE(BlocksSoftLimit);
+ COMPARE(BlocksHardLimit);
+ COMPARE(NumFiles);
+ COMPARE(NumOldFiles);
+ COMPARE(NumDeletedFiles);
+ COMPARE(NumDirectories);
+
+ #undef COMPARE
+
+ return numChanges;
+}
+
+#define APPLY_DELTA(field, delta) \
+ if(mReadOnly) \
+ { \
+ THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly) \
+ } \
+ \
+ if((field + delta) < 0) \
+ { \
+ THROW_EXCEPTION_MESSAGE(BackupStoreException, \
+ StoreInfoBlockDeltaMakesValueNegative, \
+ "Failed to reduce " << #field << " from " << \
+ field << " by " << delta); \
+ } \
+ \
+ field += delta; \
+ mIsModified = true;
+>>>>>>> 0.12
// --------------------------------------------------------------------------
//
@@ -361,6 +673,7 @@ void BackupStoreInfo::Save()
// --------------------------------------------------------------------------
void BackupStoreInfo::ChangeBlocksUsed(int64_t Delta)
{
+<<<<<<< HEAD
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly)
@@ -373,6 +686,23 @@ void BackupStoreInfo::ChangeBlocksUsed(int64_t Delta)
mBlocksUsed += Delta;
mIsModified = true;
+=======
+ APPLY_DELTA(mBlocksUsed, Delta);
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreInfo::ChangeBlocksInCurrentFiles(int32_t)
+// Purpose: Change number of blocks in current files, by a delta
+// amount
+// Created: 2010/08/26
+//
+// --------------------------------------------------------------------------
+void BackupStoreInfo::ChangeBlocksInCurrentFiles(int64_t Delta)
+{
+ APPLY_DELTA(mBlocksInCurrentFiles, Delta);
+>>>>>>> 0.12
}
// --------------------------------------------------------------------------
@@ -385,6 +715,7 @@ void BackupStoreInfo::ChangeBlocksUsed(int64_t Delta)
// --------------------------------------------------------------------------
void BackupStoreInfo::ChangeBlocksInOldFiles(int64_t Delta)
{
+<<<<<<< HEAD
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly)
@@ -397,6 +728,9 @@ void BackupStoreInfo::ChangeBlocksInOldFiles(int64_t Delta)
mBlocksInOldFiles += Delta;
mIsModified = true;
+=======
+ APPLY_DELTA(mBlocksInOldFiles, Delta);
+>>>>>>> 0.12
}
// --------------------------------------------------------------------------
@@ -409,6 +743,7 @@ void BackupStoreInfo::ChangeBlocksInOldFiles(int64_t Delta)
// --------------------------------------------------------------------------
void BackupStoreInfo::ChangeBlocksInDeletedFiles(int64_t Delta)
{
+<<<<<<< HEAD
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly)
@@ -421,6 +756,9 @@ void BackupStoreInfo::ChangeBlocksInDeletedFiles(int64_t Delta)
mBlocksInDeletedFiles += Delta;
mIsModified = true;
+=======
+ APPLY_DELTA(mBlocksInDeletedFiles, Delta);
+>>>>>>> 0.12
}
// --------------------------------------------------------------------------
@@ -433,6 +771,7 @@ void BackupStoreInfo::ChangeBlocksInDeletedFiles(int64_t Delta)
// --------------------------------------------------------------------------
void BackupStoreInfo::ChangeBlocksInDirectories(int64_t Delta)
{
+<<<<<<< HEAD
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly)
@@ -447,6 +786,30 @@ void BackupStoreInfo::ChangeBlocksInDirectories(int64_t Delta)
mIsModified = true;
}
+=======
+ APPLY_DELTA(mBlocksInDirectories, Delta);
+}
+
+void BackupStoreInfo::AdjustNumFiles(int64_t increase)
+{
+ APPLY_DELTA(mNumFiles, increase);
+}
+
+void BackupStoreInfo::AdjustNumOldFiles(int64_t increase)
+{
+ APPLY_DELTA(mNumOldFiles, increase);
+}
+
+void BackupStoreInfo::AdjustNumDeletedFiles(int64_t increase)
+{
+ APPLY_DELTA(mNumDeletedFiles, increase);
+}
+
+void BackupStoreInfo::AdjustNumDirectories(int64_t increase)
+{
+ APPLY_DELTA(mNumDirectories, increase);
+}
+>>>>>>> 0.12
// --------------------------------------------------------------------------
//
@@ -590,4 +953,26 @@ void BackupStoreInfo::SetClientStoreMarker(int64_t ClientStoreMarker)
}
+<<<<<<< HEAD
+=======
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreInfo::SetAccountName(const std::string&)
+// Purpose: Sets the account name
+// Created: 2008/08/22
+//
+// --------------------------------------------------------------------------
+void BackupStoreInfo::SetAccountName(const std::string& rName)
+{
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoIsReadOnly)
+ }
+
+ mAccountName = rName;
+
+ mIsModified = true;
+}
+>>>>>>> 0.12
diff --git a/lib/backupstore/BackupStoreInfo.h b/lib/backupstore/BackupStoreInfo.h
index a94ca9d6..db2a62ef 100644
--- a/lib/backupstore/BackupStoreInfo.h
+++ b/lib/backupstore/BackupStoreInfo.h
@@ -14,8 +14,56 @@
#include <string>
#include <vector>
+<<<<<<< HEAD
class BackupStoreCheck;
+=======
+#include "CollectInBufferStream.h"
+
+class BackupStoreCheck;
+
+// set packing to one byte
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "BeginStructPackForWire.h"
+#else
+BEGIN_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+// ******************
+// make sure the defaults in CreateNew are modified!
+// ******************
+// Old version, grandfathered, do not change!
+typedef struct
+{
+ int32_t mMagicValue; // also the version number
+ int32_t mAccountID;
+ int64_t mClientStoreMarker;
+ int64_t mLastObjectIDUsed;
+ int64_t mBlocksUsed;
+ int64_t mBlocksInOldFiles;
+ int64_t mBlocksInDeletedFiles;
+ int64_t mBlocksInDirectories;
+ int64_t mBlocksSoftLimit;
+ int64_t mBlocksHardLimit;
+ uint32_t mCurrentMarkNumber;
+ uint32_t mOptionsPresent; // bit mask of optional elements present
+ int64_t mNumberDeletedDirectories;
+ // Then loads of int64_t IDs for the deleted directories
+} info_StreamFormat_1;
+
+#define INFO_MAGIC_VALUE_1 0x34832476
+#define INFO_MAGIC_VALUE_2 0x494e4632 /* INF2 */
+
+// Use default packing
+#ifdef STRUCTURE_PACKING_FOR_WIRE_USE_HEADERS
+#include "EndStructPackForWire.h"
+#else
+END_STRUCTURE_PACKING_FOR_WIRE
+#endif
+
+#define INFO_FILENAME "info"
+
+>>>>>>> 0.12
// --------------------------------------------------------------------------
//
// Class
@@ -46,23 +94,47 @@ public:
bool IsModified() const {return mIsModified;}
// Save modified infomation back to store
+<<<<<<< HEAD
void Save();
+=======
+ void Save(bool allowOverwrite = true);
+>>>>>>> 0.12
// Data access functions
int32_t GetAccountID() const {return mAccountID;}
int64_t GetLastObjectIDUsed() const {return mLastObjectIDUsed;}
int64_t GetBlocksUsed() const {return mBlocksUsed;}
+<<<<<<< HEAD
+=======
+ int64_t GetBlocksInCurrentFiles() const {return mBlocksInCurrentFiles;}
+>>>>>>> 0.12
int64_t GetBlocksInOldFiles() const {return mBlocksInOldFiles;}
int64_t GetBlocksInDeletedFiles() const {return mBlocksInDeletedFiles;}
int64_t GetBlocksInDirectories() const {return mBlocksInDirectories;}
const std::vector<int64_t> &GetDeletedDirectories() const {return mDeletedDirectories;}
int64_t GetBlocksSoftLimit() const {return mBlocksSoftLimit;}
int64_t GetBlocksHardLimit() const {return mBlocksHardLimit;}
+<<<<<<< HEAD
bool IsReadOnly() const {return mReadOnly;}
int GetDiscSetNumber() const {return mDiscSet;}
// Data modification functions
void ChangeBlocksUsed(int64_t Delta);
+=======
+ int64_t GetNumFiles() const {return mNumFiles;}
+ int64_t GetNumOldFiles() const {return mNumOldFiles;}
+ int64_t GetNumDeletedFiles() const {return mNumDeletedFiles;}
+ int64_t GetNumDirectories() const {return mNumDirectories;}
+ bool IsAccountEnabled() const {return mAccountEnabled;}
+ bool IsReadOnly() const {return mReadOnly;}
+ int GetDiscSetNumber() const {return mDiscSet;}
+
+ int ReportChangesTo(BackupStoreInfo& rOldInfo);
+
+ // Data modification functions
+ void ChangeBlocksUsed(int64_t Delta);
+ void ChangeBlocksInCurrentFiles(int64_t Delta);
+>>>>>>> 0.12
void ChangeBlocksInOldFiles(int64_t Delta);
void ChangeBlocksInDeletedFiles(int64_t Delta);
void ChangeBlocksInDirectories(int64_t Delta);
@@ -70,11 +142,19 @@ public:
void AddDeletedDirectory(int64_t DirID);
void RemovedDeletedDirectory(int64_t DirID);
void ChangeLimits(int64_t BlockSoftLimit, int64_t BlockHardLimit);
+<<<<<<< HEAD
+=======
+ void AdjustNumFiles(int64_t increase);
+ void AdjustNumOldFiles(int64_t increase);
+ void AdjustNumDeletedFiles(int64_t increase);
+ void AdjustNumDirectories(int64_t increase);
+>>>>>>> 0.12
// Object IDs
int64_t AllocateObjectID();
// Client marker set and get
+<<<<<<< HEAD
int64_t GetClientStoreMarker() {return mClientStoreMarker;}
void SetClientStoreMarker(int64_t ClientStoreMarker);
@@ -86,6 +166,35 @@ private:
private:
// Location information
int32_t mAccountID;
+=======
+ int64_t GetClientStoreMarker() const {return mClientStoreMarker;}
+ void SetClientStoreMarker(int64_t ClientStoreMarker);
+
+ const std::string& GetAccountName() const { return mAccountName; }
+ void SetAccountName(const std::string& rName);
+ const CollectInBufferStream& GetExtraData() const { return mExtraData; }
+ void SetAccountEnabled(bool IsEnabled) {mAccountEnabled = IsEnabled; }
+
+ /**
+ * @return a new BackupStoreInfo with the requested properties.
+ * This is exposed to allow testing, do not use otherwise!
+ */
+ static std::auto_ptr<BackupStoreInfo> CreateForRegeneration(
+ int32_t AccountID, const std::string &rAccountName,
+ const std::string &rRootDir, int DiscSet,
+ int64_t LastObjectID, int64_t BlocksUsed,
+ int64_t BlocksInCurrentFiles, int64_t BlocksInOldFiles,
+ int64_t BlocksInDeletedFiles, int64_t BlocksInDirectories,
+ int64_t BlockSoftLimit, int64_t BlockHardLimit,
+ bool AccountEnabled, IOStream& ExtraData);
+
+private:
+ // Location information
+ // Be VERY careful about changing types of these values, as
+ // they now define the sizes of fields on disk (via Archive).
+ int32_t mAccountID;
+ std::string mAccountName;
+>>>>>>> 0.12
int mDiscSet;
std::string mFilename;
bool mReadOnly;
@@ -97,15 +206,31 @@ private:
// Account information
int64_t mLastObjectIDUsed;
int64_t mBlocksUsed;
+<<<<<<< HEAD
+=======
+ int64_t mBlocksInCurrentFiles;
+>>>>>>> 0.12
int64_t mBlocksInOldFiles;
int64_t mBlocksInDeletedFiles;
int64_t mBlocksInDirectories;
int64_t mBlocksSoftLimit;
int64_t mBlocksHardLimit;
+<<<<<<< HEAD
std::vector<int64_t> mDeletedDirectories;
};
+=======
+ int64_t mNumFiles;
+ int64_t mNumOldFiles;
+ int64_t mNumDeletedFiles;
+ int64_t mNumDirectories;
+ std::vector<int64_t> mDeletedDirectories;
+ bool mAccountEnabled;
+ CollectInBufferStream mExtraData;
+};
+
+>>>>>>> 0.12
#endif // BACKUPSTOREINFO__H
diff --git a/lib/backupstore/BackupStoreObjectMagic.h b/lib/backupstore/BackupStoreObjectMagic.h
new file mode 100644
index 00000000..7ee600a2
--- /dev/null
+++ b/lib/backupstore/BackupStoreObjectMagic.h
@@ -0,0 +1,31 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreObjectMagic.h
+// Purpose: Magic values for the start of objects in the backup store
+// Created: 19/11/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREOBJECTMAGIC__H
+#define BACKUPSTOREOBJECTMAGIC__H
+
+// Each of these values is the first 4 bytes of the object file.
+// Remember to swap from network to host byte order.
+
+// Magic value for file streams
+#define OBJECTMAGIC_FILE_MAGIC_VALUE_V1 0x66696C65
+// Do not use v0 in any new code!
+#define OBJECTMAGIC_FILE_MAGIC_VALUE_V0 0x46494C45
+
+// Magic for the block index at the file stream -- used to
+// ensure streams are reordered as expected
+#define OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V1 0x62696478
+// Do not use v0 in any new code!
+#define OBJECTMAGIC_FILE_BLOCKS_MAGIC_VALUE_V0 0x46426C6B
+
+// Magic value for directory streams
+#define OBJECTMAGIC_DIR_MAGIC_VALUE 0x4449525F
+
+#endif // BACKUPSTOREOBJECTMAGIC__H
+
diff --git a/lib/backupstore/BackupStoreRefCountDatabase.cpp b/lib/backupstore/BackupStoreRefCountDatabase.cpp
index f6db2ca4..642e260c 100644
--- a/lib/backupstore/BackupStoreRefCountDatabase.cpp
+++ b/lib/backupstore/BackupStoreRefCountDatabase.cpp
@@ -61,7 +61,11 @@ std::string BackupStoreRefCountDatabase::GetFilename(const
ASSERT(RootDir[RootDir.size() - 1] == '/' ||
RootDir[RootDir.size() - 1] == DIRECTORY_SEPARATOR_ASCHAR);
+<<<<<<< HEAD
std::string fn(RootDir + "refcount.db");
+=======
+ std::string fn(RootDir + REFCOUNT_FILENAME ".db");
+>>>>>>> 0.12
RaidFileController &rcontroller(RaidFileController::GetController());
RaidFileDiscSet rdiscSet(rcontroller.GetDiscSet(rAccount.GetDiscSet()));
return RaidFileUtil::MakeWriteFileName(rdiscSet, fn);
@@ -91,9 +95,15 @@ void BackupStoreRefCountDatabase::Create(const
// Open the file for writing
if (FileExists(Filename) && !AllowOverwrite)
{
+<<<<<<< HEAD
BOX_ERROR("Attempted to overwrite refcount database file: " <<
Filename);
THROW_EXCEPTION(RaidFileException, CannotOverwriteExistingFile);
+=======
+ THROW_FILE_ERROR("Failed to overwrite refcount database: "
+ "not allowed here", Filename, RaidFileException,
+ CannotOverwriteExistingFile);
+>>>>>>> 0.12
}
int flags = O_CREAT | O_BINARY | O_RDWR;
@@ -134,14 +144,26 @@ std::auto_ptr<BackupStoreRefCountDatabase> BackupStoreRefCountDatabase::Load(
refcount_StreamFormat hdr;
if(!dbfile->ReadFullBuffer(&hdr, sizeof(hdr), 0 /* not interested in bytes read if this fails */))
{
+<<<<<<< HEAD
THROW_EXCEPTION(BackupStoreException, CouldNotLoadStoreInfo)
+=======
+ THROW_FILE_ERROR("Failed to read refcount database: "
+ "short read", filename, BackupStoreException,
+ CouldNotLoadStoreInfo);
+>>>>>>> 0.12
}
// Check it
if(ntohl(hdr.mMagicValue) != REFCOUNT_MAGIC_VALUE ||
(int32_t)ntohl(hdr.mAccountID) != rAccount.GetID())
{
+<<<<<<< HEAD
THROW_EXCEPTION(BackupStoreException, BadStoreInfoOnLoad)
+=======
+ THROW_FILE_ERROR("Failed to read refcount database: "
+ "bad magic number", filename, BackupStoreException,
+ BadStoreInfoOnLoad);
+>>>>>>> 0.12
}
// Make new object
@@ -158,6 +180,7 @@ std::auto_ptr<BackupStoreRefCountDatabase> BackupStoreRefCountDatabase::Load(
// --------------------------------------------------------------------------
//
// Function
+<<<<<<< HEAD
// Name: BackupStoreRefCountDatabase::Save()
// Purpose: Save modified info back to disc
// Created: 2003/08/28
@@ -241,6 +264,8 @@ void BackupStoreRefCountDatabase::Save()
// --------------------------------------------------------------------------
//
// Function
+=======
+>>>>>>> 0.12
// Name: BackupStoreRefCountDatabase::GetRefCount(int64_t
// ObjectID)
// Purpose: Get the number of references to the specified object
@@ -255,10 +280,17 @@ BackupStoreRefCountDatabase::GetRefCount(int64_t ObjectID) const
if (GetSize() < offset + GetEntrySize())
{
+<<<<<<< HEAD
BOX_ERROR("attempted read of unknown refcount for object " <<
BOX_FORMAT_OBJECTID(ObjectID));
THROW_EXCEPTION(BackupStoreException,
UnknownObjectRefCountRequested);
+=======
+ THROW_FILE_ERROR("Failed to read refcount database: "
+ "attempted read of unknown refcount for object " <<
+ BOX_FORMAT_OBJECTID(ObjectID), mFilename,
+ BackupStoreException, UnknownObjectRefCountRequested);
+>>>>>>> 0.12
}
mapDatabaseFile->Seek(offset, SEEK_SET);
@@ -267,9 +299,15 @@ BackupStoreRefCountDatabase::GetRefCount(int64_t ObjectID) const
if (mapDatabaseFile->Read(&refcount, sizeof(refcount)) !=
sizeof(refcount))
{
+<<<<<<< HEAD
BOX_LOG_SYS_ERROR("short read on refcount database: " <<
mFilename);
THROW_EXCEPTION(BackupStoreException, CouldNotLoadStoreInfo);
+=======
+ THROW_FILE_ERROR("Failed to read refcount database: "
+ "short read at offset " << offset, mFilename,
+ BackupStoreException, CouldNotLoadStoreInfo);
+>>>>>>> 0.12
}
return ntohl(refcount);
diff --git a/lib/backupstore/HousekeepStoreAccount.cpp b/lib/backupstore/HousekeepStoreAccount.cpp
new file mode 100644
index 00000000..75feda7f
--- /dev/null
+++ b/lib/backupstore/HousekeepStoreAccount.cpp
@@ -0,0 +1,1110 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: HousekeepStoreAccount.cpp
+// Purpose:
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+
+#include <map>
+
+#include "autogen_BackupStoreException.h"
+#include "BackupConstants.h"
+#include "BackupStoreAccountDatabase.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreInfo.h"
+#include "BackupStoreRefCountDatabase.h"
+#include "BufferedStream.h"
+#include "HousekeepStoreAccount.h"
+#include "NamedLock.h"
+#include "RaidFileRead.h"
+#include "RaidFileWrite.h"
+#include "StoreStructure.h"
+
+#include "MemLeakFindOn.h"
+
+// check every 32 directories scanned/files deleted
+#define POLL_INTERPROCESS_MSG_CHECK_FREQUENCY 32
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::HousekeepStoreAccount(int, const std::string &, int, BackupStoreDaemon &)
+// Purpose: Constructor
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+HousekeepStoreAccount::HousekeepStoreAccount(int AccountID,
+ const std::string &rStoreRoot, int StoreDiscSet,
+ HousekeepingCallback* pHousekeepingCallback)
+ : mAccountID(AccountID),
+ mStoreRoot(rStoreRoot),
+ mStoreDiscSet(StoreDiscSet),
+ mpHousekeepingCallback(pHousekeepingCallback),
+ mDeletionSizeTarget(0),
+ mPotentialDeletionsTotalSize(0),
+ mMaxSizeInPotentialDeletions(0),
+ mBlocksUsed(0),
+ mBlocksInOldFiles(0),
+ mBlocksInDeletedFiles(0),
+ mBlocksInDirectories(0),
+ mBlocksUsedDelta(0),
+ mBlocksInOldFilesDelta(0),
+ mBlocksInDeletedFilesDelta(0),
+ mBlocksInDirectoriesDelta(0),
+ mFilesDeleted(0),
+ mEmptyDirectoriesDeleted(0),
+ mSuppressRefCountChangeWarnings(false),
+ mRefCountsAdjusted(0),
+ mCountUntilNextInterprocessMsgCheck(POLL_INTERPROCESS_MSG_CHECK_FREQUENCY)
+{
+ std::ostringstream tag;
+ tag << "hk=" << BOX_FORMAT_ACCOUNT(mAccountID);
+ mTagWithClientID.Change(tag.str());
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::~HousekeepStoreAccount()
+// Purpose: Destructor
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+HousekeepStoreAccount::~HousekeepStoreAccount()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DoHousekeeping()
+// Purpose: Perform the housekeeping
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever)
+{
+ BOX_TRACE("Starting housekeeping on account " <<
+ BOX_FORMAT_ACCOUNT(mAccountID));
+
+ // Attempt to lock the account
+ std::string writeLockFilename;
+ StoreStructure::MakeWriteLockFilename(mStoreRoot, mStoreDiscSet,
+ writeLockFilename);
+ NamedLock writeLock;
+ if(!writeLock.TryAndGetLock(writeLockFilename.c_str(),
+ 0600 /* restrictive file permissions */))
+ {
+ if(KeepTryingForever)
+ {
+ BOX_WARNING("Failed to lock account for housekeeping, "
+ "still trying...");
+ while(!writeLock.TryAndGetLock(writeLockFilename,
+ 0600 /* restrictive file permissions */))
+ {
+ sleep(1);
+ }
+ }
+ else
+ {
+ // Couldn't lock the account -- just stop now
+ return false;
+ }
+ }
+
+ // Load the store info to find necessary info for the housekeeping
+ std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::Load(mAccountID,
+ mStoreRoot, mStoreDiscSet, false /* Read/Write */));
+ std::auto_ptr<BackupStoreInfo> pOldInfo(
+ BackupStoreInfo::Load(mAccountID, mStoreRoot, mStoreDiscSet,
+ true /* Read Only */));
+
+ // If the account has a name, change the logging tag to include it
+ if(!(info->GetAccountName().empty()))
+ {
+ std::ostringstream tag;
+ tag << "hk=" << BOX_FORMAT_ACCOUNT(mAccountID) << "/" <<
+ info->GetAccountName();
+ mTagWithClientID.Change(tag.str());
+ }
+
+ // Calculate how much should be deleted
+ mDeletionSizeTarget = info->GetBlocksUsed() - info->GetBlocksSoftLimit();
+ if(mDeletionSizeTarget < 0)
+ {
+ mDeletionSizeTarget = 0;
+ }
+
+ // initialise the refcount database
+ mNewRefCounts.clear();
+ // try to pre-allocate as much memory as we need
+ mNewRefCounts.reserve(info->GetLastObjectIDUsed());
+ // initialise the refcount of the root entry
+ mNewRefCounts.resize(BACKUPSTORE_ROOT_DIRECTORY_ID + 1, 0);
+ mNewRefCounts[BACKUPSTORE_ROOT_DIRECTORY_ID] = 1;
+
+ // Scan the directory for potential things to delete
+ // This will also remove eligible items marked with RemoveASAP
+ bool continueHousekeeping = ScanDirectory(BACKUPSTORE_ROOT_DIRECTORY_ID);
+
+ // If scan directory stopped for some reason, probably parent
+ // instructed to terminate, stop now.
+ if(!continueHousekeeping)
+ {
+ // If any files were marked "delete now", then update
+ // the size of the store.
+ if(mBlocksUsedDelta != 0 ||
+ mBlocksInOldFilesDelta != 0 ||
+ mBlocksInDeletedFilesDelta != 0)
+ {
+ info->ChangeBlocksUsed(mBlocksUsedDelta);
+ info->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta);
+ info->ChangeBlocksInDeletedFiles(mBlocksInDeletedFilesDelta);
+
+ // Save the store info back
+ info->ReportChangesTo(*pOldInfo);
+ info->Save();
+ }
+
+ return false;
+ }
+
+ // Log any difference in opinion between the values recorded in
+ // the store info, and the values just calculated for space usage.
+ // BLOCK
+ {
+ int64_t used = info->GetBlocksUsed();
+ int64_t usedOld = info->GetBlocksInOldFiles();
+ int64_t usedDeleted = info->GetBlocksInDeletedFiles();
+ int64_t usedDirectories = info->GetBlocksInDirectories();
+
+ // If the counts were wrong, taking into account RemoveASAP
+ // items deleted, log a message
+ if((used + mBlocksUsedDelta) != mBlocksUsed
+ || (usedOld + mBlocksInOldFilesDelta) != mBlocksInOldFiles
+ || (usedDeleted + mBlocksInDeletedFilesDelta) != mBlocksInDeletedFiles
+ || usedDirectories != mBlocksInDirectories)
+ {
+ // Log this
+ BOX_ERROR("Housekeeping on account " <<
+ BOX_FORMAT_ACCOUNT(mAccountID) << " found "
+ "and fixed wrong block counts: "
+ "used (" <<
+ (used + mBlocksUsedDelta) << "," <<
+ mBlocksUsed << "), old (" <<
+ (usedOld + mBlocksInOldFilesDelta) << "," <<
+ mBlocksInOldFiles << "), deleted (" <<
+ (usedDeleted + mBlocksInDeletedFilesDelta) <<
+ "," << mBlocksInDeletedFiles << "), dirs (" <<
+ usedDirectories << "," << mBlocksInDirectories
+ << ")");
+ }
+
+ // If the current values don't match, store them
+ if(used != mBlocksUsed
+ || usedOld != mBlocksInOldFiles
+ || usedDeleted != mBlocksInDeletedFiles
+ || usedDirectories != (mBlocksInDirectories + mBlocksInDirectoriesDelta))
+ {
+ // Set corrected values in store info
+ info->CorrectAllUsedValues(mBlocksUsed,
+ mBlocksInOldFiles, mBlocksInDeletedFiles,
+ mBlocksInDirectories + mBlocksInDirectoriesDelta);
+
+ info->ReportChangesTo(*pOldInfo);
+ info->Save();
+ }
+ }
+
+ // Reset the delta counts for files, as they will include
+ // RemoveASAP flagged files deleted during the initial scan.
+
+ // keep for reporting
+ int64_t removeASAPBlocksUsedDelta = mBlocksUsedDelta;
+
+ mBlocksUsedDelta = 0;
+ mBlocksInOldFilesDelta = 0;
+ mBlocksInDeletedFilesDelta = 0;
+
+ // Go and delete items from the accounts
+ bool deleteInterrupted = DeleteFiles();
+
+ // If that wasn't interrupted, remove any empty directories which
+ // are also marked as deleted in their containing directory
+ if(!deleteInterrupted)
+ {
+ deleteInterrupted = DeleteEmptyDirectories();
+ }
+
+ // Log deletion if anything was deleted
+ if(mFilesDeleted > 0 || mEmptyDirectoriesDeleted > 0)
+ {
+ BOX_INFO("Housekeeping on account " <<
+ BOX_FORMAT_ACCOUNT(mAccountID) << " "
+ "removed " <<
+ (0 - (mBlocksUsedDelta + removeASAPBlocksUsedDelta)) <<
+ " blocks (" << mFilesDeleted << " files, " <<
+ mEmptyDirectoriesDeleted << " dirs)" <<
+ (deleteInterrupted?" and was interrupted":""));
+ }
+
+ // We can only update the refcount database if we successfully
+ // finished our scan of all directories, otherwise we don't actually
+ // know which of the new counts are valid and which aren't
+ // (we might not have seen second references to some objects, etc.)
+
+ BackupStoreAccountDatabase::Entry account(mAccountID, mStoreDiscSet);
+ std::auto_ptr<BackupStoreRefCountDatabase> apReferences;
+
+ // try to load the reference count database
+ try
+ {
+ apReferences = BackupStoreRefCountDatabase::Load(account,
+ false);
+ }
+ catch(BoxException &e)
+ {
+ BOX_WARNING("Reference count database is missing or corrupted "
+ "during housekeeping, creating a new one.");
+ mSuppressRefCountChangeWarnings = true;
+ BackupStoreRefCountDatabase::CreateForRegeneration(account);
+ apReferences = BackupStoreRefCountDatabase::Load(account,
+ false);
+ }
+
+ int64_t LastUsedObjectIdOnDisk = apReferences->GetLastObjectIDUsed();
+
+ for (int64_t ObjectID = BACKUPSTORE_ROOT_DIRECTORY_ID;
+ ObjectID < mNewRefCounts.size(); ObjectID++)
+ {
+ if (ObjectID > LastUsedObjectIdOnDisk)
+ {
+ if (!mSuppressRefCountChangeWarnings)
+ {
+ BOX_WARNING("Reference count of object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " not found in database, added"
+ " with " << mNewRefCounts[ObjectID] <<
+ " references");
+ }
+ apReferences->SetRefCount(ObjectID,
+ mNewRefCounts[ObjectID]);
+ mRefCountsAdjusted++;
+ LastUsedObjectIdOnDisk = ObjectID;
+ continue;
+ }
+
+ BackupStoreRefCountDatabase::refcount_t OldRefCount =
+ apReferences->GetRefCount(ObjectID);
+
+ if (OldRefCount != mNewRefCounts[ObjectID])
+ {
+ BOX_WARNING("Reference count of object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " changed from " << OldRefCount <<
+ " to " << mNewRefCounts[ObjectID]);
+ apReferences->SetRefCount(ObjectID,
+ mNewRefCounts[ObjectID]);
+ mRefCountsAdjusted++;
+ }
+ }
+
+ // zero excess references in the database
+ for (int64_t ObjectID = mNewRefCounts.size();
+ ObjectID <= LastUsedObjectIdOnDisk; ObjectID++)
+ {
+ BackupStoreRefCountDatabase::refcount_t OldRefCount =
+ apReferences->GetRefCount(ObjectID);
+ BackupStoreRefCountDatabase::refcount_t NewRefCount = 0;
+
+ if (OldRefCount != NewRefCount)
+ {
+ BOX_WARNING("Reference count of object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " changed from " << OldRefCount <<
+ " to " << NewRefCount << " (not found)");
+ apReferences->SetRefCount(ObjectID, NewRefCount);
+ mRefCountsAdjusted++;
+ }
+ }
+
+ // force file to be saved and closed before releasing the lock below
+ apReferences.reset();
+
+ // Make sure the delta's won't cause problems if the counts are
+ // really wrong, and it wasn't fixed because the store was
+ // updated during the scan.
+ if(mBlocksUsedDelta < (0 - info->GetBlocksUsed()))
+ {
+ mBlocksUsedDelta = (0 - info->GetBlocksUsed());
+ }
+ if(mBlocksInOldFilesDelta < (0 - info->GetBlocksInOldFiles()))
+ {
+ mBlocksInOldFilesDelta = (0 - info->GetBlocksInOldFiles());
+ }
+ if(mBlocksInDeletedFilesDelta < (0 - info->GetBlocksInDeletedFiles()))
+ {
+ mBlocksInDeletedFilesDelta = (0 - info->GetBlocksInDeletedFiles());
+ }
+ if(mBlocksInDirectoriesDelta < (0 - info->GetBlocksInDirectories()))
+ {
+ mBlocksInDirectoriesDelta = (0 - info->GetBlocksInDirectories());
+ }
+
+ // Update the usage counts in the store
+ info->ChangeBlocksUsed(mBlocksUsedDelta);
+ info->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta);
+ info->ChangeBlocksInDeletedFiles(mBlocksInDeletedFilesDelta);
+ info->ChangeBlocksInDirectories(mBlocksInDirectoriesDelta);
+
+ // Save the store info back
+ info->ReportChangesTo(*pOldInfo);
+ info->Save();
+
+ // Explicity release the lock (would happen automatically on
+ // going out of scope, included for code clarity)
+ writeLock.ReleaseLock();
+
+ BOX_TRACE("Finished housekeeping on account " <<
+ BOX_FORMAT_ACCOUNT(mAccountID));
+ return true;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::MakeObjectFilename(int64_t, std::string &)
+// Purpose: Generate and place the filename for a given object ID
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void HousekeepStoreAccount::MakeObjectFilename(int64_t ObjectID, std::string &rFilenameOut)
+{
+ // Delegate to utility function
+ StoreStructure::MakeObjectFilename(ObjectID, mStoreRoot, mStoreDiscSet, rFilenameOut, false /* don't bother ensuring the directory exists */);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::ScanDirectory(int64_t)
+// Purpose: Private. Scan a directory for potentially deleteable
+// items, and add them to the list. Returns true if the
+// scan should continue.
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID)
+{
+#ifndef WIN32
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck =
+ POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+
+ // Check for having to stop
+ // Include account ID here as the specified account is locked
+ if(mpHousekeepingCallback && mpHousekeepingCallback->CheckForInterProcessMsg(mAccountID))
+ {
+ // Need to abort now
+ return false;
+ }
+ }
+#endif
+
+ // Get the filename
+ std::string objectFilename;
+ MakeObjectFilename(ObjectID, objectFilename);
+
+ // Open it.
+ std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet,
+ objectFilename));
+
+ // Add the size of the directory on disc to the size being calculated
+ int64_t originalDirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
+ mBlocksInDirectories += originalDirSizeInBlocks;
+ mBlocksUsed += originalDirSizeInBlocks;
+
+ // Read the directory in
+ BackupStoreDirectory dir;
+ BufferedStream buf(*dirStream);
+ dir.ReadFromStream(buf, IOStream::TimeOutInfinite);
+ dirStream->Close();
+
+ // Is it empty?
+ if(dir.GetNumberOfEntries() == 0)
+ {
+ // Add it to the list of directories to potentially delete
+ mEmptyDirectories.push_back(dir.GetObjectID());
+ }
+
+ // Calculate reference counts first, before we start requesting
+ // files to be deleted.
+ // BLOCK
+ {
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+
+ while((en = i.Next()) != 0)
+ {
+ // This directory references this object
+ if (mNewRefCounts.size() <= en->GetObjectID())
+ {
+ mNewRefCounts.resize(en->GetObjectID() + 1, 0);
+ }
+ mNewRefCounts[en->GetObjectID()]++;
+ }
+ }
+
+ // BLOCK
+ {
+ // Remove any files which are marked for removal as soon
+ // as they become old or deleted.
+ bool deletedSomething = false;
+ do
+ {
+ // Iterate through the directory
+ deletedSomething = false;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ int16_t enFlags = en->GetFlags();
+ if((enFlags & BackupStoreDirectory::Entry::Flags_RemoveASAP) != 0
+ && (enFlags & (BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0)
+ {
+ // Delete this immediately.
+ DeleteFile(ObjectID, en->GetObjectID(), dir, objectFilename, originalDirSizeInBlocks);
+
+ // flag as having done something
+ deletedSomething = true;
+
+ // Must start the loop from the beginning again, as iterator is now
+ // probably invalid.
+ break;
+ }
+ }
+ } while(deletedSomething);
+ }
+
+ // BLOCK
+ {
+ // Add files to the list of potential deletions
+
+ // map to count the distance from the mark
+ typedef std::pair<std::string, int32_t> version_t;
+ std::map<version_t, int32_t> markVersionAges;
+ // map of pair (filename, mark number) -> version age
+
+ // NOTE: use a reverse iterator to allow the distance from mark stuff to work
+ BackupStoreDirectory::ReverseIterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ // Update recalculated usage sizes
+ int16_t enFlags = en->GetFlags();
+ int64_t enSizeInBlocks = en->GetSizeInBlocks();
+ mBlocksUsed += enSizeInBlocks;
+ if(enFlags & BackupStoreDirectory::Entry::Flags_OldVersion) mBlocksInOldFiles += enSizeInBlocks;
+ if(enFlags & BackupStoreDirectory::Entry::Flags_Deleted) mBlocksInDeletedFiles += enSizeInBlocks;
+
+ // Work out ages of this version from the last mark
+ int32_t enVersionAge = 0;
+ std::map<version_t, int32_t>::iterator enVersionAgeI(
+ markVersionAges.find(
+ version_t(en->GetName().GetEncodedFilename(),
+ en->GetMarkNumber())));
+ if(enVersionAgeI != markVersionAges.end())
+ {
+ enVersionAge = enVersionAgeI->second + 1;
+ enVersionAgeI->second = enVersionAge;
+ }
+ else
+ {
+ markVersionAges[version_t(en->GetName().GetEncodedFilename(), en->GetMarkNumber())] = enVersionAge;
+ }
+ // enVersionAge is now the age of this version.
+
+ // Potentially add it to the list if it's deleted, if it's an old version or deleted
+ if((enFlags & (BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0)
+ {
+ // Is deleted / old version.
+ DelEn d;
+ d.mObjectID = en->GetObjectID();
+ d.mInDirectory = ObjectID;
+ d.mSizeInBlocks = en->GetSizeInBlocks();
+ d.mMarkNumber = en->GetMarkNumber();
+ d.mVersionAgeWithinMark = enVersionAge;
+ d.mIsFlagDeleted = (enFlags &
+ BackupStoreDirectory::Entry::Flags_Deleted)
+ ? true : false;
+
+ // Add it to the list
+ mPotentialDeletions.insert(d);
+
+ // Update various counts
+ mPotentialDeletionsTotalSize += d.mSizeInBlocks;
+ if(d.mSizeInBlocks > mMaxSizeInPotentialDeletions) mMaxSizeInPotentialDeletions = d.mSizeInBlocks;
+
+ // Too much in the list of potential deletions?
+ // (check against the deletion target + the max size in deletions, so that we never delete things
+ // and take the total size below the deletion size target)
+ if(mPotentialDeletionsTotalSize > (mDeletionSizeTarget + mMaxSizeInPotentialDeletions))
+ {
+ int64_t sizeToRemove = mPotentialDeletionsTotalSize - (mDeletionSizeTarget + mMaxSizeInPotentialDeletions);
+ bool recalcMaxSize = false;
+
+ while(sizeToRemove > 0)
+ {
+ // Make iterator for the last element, while checking that there's something there in the first place.
+ std::set<DelEn, DelEnCompare>::iterator i(mPotentialDeletions.end());
+ if(i != mPotentialDeletions.begin())
+ {
+ // Nothing left in set
+ break;
+ }
+ // Make this into an iterator pointing to the last element in the set
+ --i;
+
+ // Delete this one?
+ if(sizeToRemove > i->mSizeInBlocks)
+ {
+ sizeToRemove -= i->mSizeInBlocks;
+ if(i->mSizeInBlocks >= mMaxSizeInPotentialDeletions)
+ {
+ // Will need to recalculate the maximum size now, because we've just deleted that element
+ recalcMaxSize = true;
+ }
+ mPotentialDeletions.erase(i);
+ }
+ else
+ {
+ // Over the size to remove, so stop now
+ break;
+ }
+ }
+
+ if(recalcMaxSize)
+ {
+ // Because an object which was the maximum size recorded was deleted from the set
+ // it's necessary to recalculate this maximum.
+ mMaxSizeInPotentialDeletions = 0;
+ std::set<DelEn, DelEnCompare>::const_iterator i(mPotentialDeletions.begin());
+ for(; i != mPotentialDeletions.end(); ++i)
+ {
+ if(i->mSizeInBlocks > mMaxSizeInPotentialDeletions)
+ {
+ mMaxSizeInPotentialDeletions = i->mSizeInBlocks;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ {
+ // Recurse into subdirectories
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir)) != 0)
+ {
+ // Next level
+ ASSERT((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == BackupStoreDirectory::Entry::Flags_Dir);
+
+ if(!ScanDirectory(en->GetObjectID()))
+ {
+ // Halting operation
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount::DelEn &, const HousekeepStoreAccount::DelEnd &)
+// Purpose: Comparison function for set
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount::DelEn &x, const HousekeepStoreAccount::DelEn &y)
+{
+ // STL spec says this:
+ // A Strict Weak Ordering is a Binary Predicate that compares two objects, returning true if the first precedes the second.
+
+ // The sort order here is intended to preserve the entries of most value, that is, the newest objects
+ // which are on a mark boundary.
+
+ // Reverse order age, so oldest goes first
+ if(x.mVersionAgeWithinMark > y.mVersionAgeWithinMark)
+ {
+ return true;
+ }
+ else if(x.mVersionAgeWithinMark < y.mVersionAgeWithinMark)
+ {
+ return false;
+ }
+
+ // but mark number in ascending order, so that the oldest marks are deleted first
+ if(x.mMarkNumber < y.mMarkNumber)
+ {
+ return true;
+ }
+ else if(x.mMarkNumber > y.mMarkNumber)
+ {
+ return false;
+ }
+
+ // Just compare object ID now to put the oldest objects first
+ return x.mObjectID < y.mObjectID;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteFiles()
+// Purpose: Delete the files targeted for deletion, returning
+// true if the operation was interrupted
+// Created: 15/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DeleteFiles()
+{
+ // Only delete files if the deletion target is greater than zero
+ // (otherwise we delete one file each time round, which gradually deletes the old versions)
+ if(mDeletionSizeTarget <= 0)
+ {
+ // Not interrupted
+ return false;
+ }
+
+ // Iterate through the set of potential deletions, until enough has been deleted.
+ // (there is likely to be more in the set than should be actually deleted).
+ for(std::set<DelEn, DelEnCompare>::iterator i(mPotentialDeletions.begin()); i != mPotentialDeletions.end(); ++i)
+ {
+#ifndef WIN32
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck = POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+ // Check for having to stop
+ if(mpHousekeepingCallback && mpHousekeepingCallback->CheckForInterProcessMsg(mAccountID)) // include account ID here as the specified account is now locked
+ {
+ // Need to abort now
+ return true;
+ }
+ }
+#endif
+
+ // Load up the directory it's in
+ // Get the filename
+ std::string dirFilename;
+ BackupStoreDirectory dir;
+ int64_t dirSizeInBlocksOrig = 0;
+ {
+ MakeObjectFilename(i->mInDirectory, dirFilename);
+ std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet, dirFilename));
+ dirSizeInBlocksOrig = dirStream->GetDiscUsageInBlocks();
+ dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
+ }
+
+ // Delete the file
+ DeleteFile(i->mInDirectory, i->mObjectID, dir, dirFilename, dirSizeInBlocksOrig);
+ BOX_INFO("Housekeeping removed " <<
+ (i->mIsFlagDeleted ? "deleted" : "old") <<
+ " file " << BOX_FORMAT_OBJECTID(i->mObjectID) <<
+ " from dir " << BOX_FORMAT_OBJECTID(i->mInDirectory));
+
+ // Stop if the deletion target has been matched or exceeded
+ // (checking here rather than at the beginning will tend to reduce the
+ // space to slightly less than the soft limit, which will allow the backup
+ // client to start uploading files again)
+ if((0 - mBlocksUsedDelta) >= mDeletionSizeTarget)
+ {
+ break;
+ }
+ }
+
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteFile(int64_t, int64_t,
+// BackupStoreDirectory &, const std::string &, int64_t)
+// Purpose: Delete a file. Takes the directory already loaded
+// in and the filename, for efficiency in both the
+// usage scenarios.
+// Created: 15/7/04
+//
+// --------------------------------------------------------------------------
+void HousekeepStoreAccount::DeleteFile(int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, int64_t OriginalDirSizeInBlocks)
+{
+ // Find the entry inside the directory
+ bool wasDeleted = false;
+ bool wasOldVersion = false;
+ int64_t deletedFileSizeInBlocks = 0;
+ // A pointer to an object which requires committing if the directory save goes OK
+ std::auto_ptr<RaidFileWrite> padjustedEntry;
+ // BLOCK
+ {
+ BackupStoreDirectory::Entry *pentry = rDirectory.FindEntryByID(ObjectID);
+ if(pentry == 0)
+ {
+ BOX_ERROR("Housekeeping on account " <<
+ BOX_FORMAT_ACCOUNT(mAccountID) << " "
+ "found error: object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) << " "
+ "not found in dir " <<
+ BOX_FORMAT_OBJECTID(InDirectory) << ", "
+ "indicates logic error/corruption? Run "
+ "bbstoreaccounts check <accid> fix");
+ return;
+ }
+
+ // Record the flags it's got set
+ wasDeleted = ((pentry->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0);
+ wasOldVersion = ((pentry->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) != 0);
+ // Check this should be deleted
+ if(!wasDeleted && !wasOldVersion)
+ {
+ // Things changed size we were last around
+ return;
+ }
+
+ // Record size
+ deletedFileSizeInBlocks = pentry->GetSizeInBlocks();
+
+ // If the entry is involved in a chain of patches, it needs to be handled
+ // a bit more carefully.
+ if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0)
+ {
+ // This entry is a patch from a newer entry. Just need to update the info on that entry.
+ BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
+ if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Change the info in the newer entry so that this no longer points to this entry
+ pnewer->SetDependsOlder(0);
+ }
+ else if(pentry->GetDependsOlder() != 0)
+ {
+ BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder());
+ if(pentry->GetDependsNewer() == 0)
+ {
+ // There exists an older version which depends on this one. Need to combine the two over that one.
+
+ // Adjust the other entry in the directory
+ if(polder == 0 || polder->GetDependsNewer() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Change the info in the older entry so that this no longer points to this entry
+ polder->SetDependsNewer(0);
+ }
+ else
+ {
+ // This entry is in the middle of a chain, and two patches need combining.
+
+ // First, adjust the directory entries
+ BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
+ if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID
+ || polder == 0 || polder->GetDependsNewer() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Remove the middle entry from the linked list by simply using the values from this entry
+ pnewer->SetDependsOlder(pentry->GetDependsOlder());
+ polder->SetDependsNewer(pentry->GetDependsNewer());
+ }
+
+ // COMMON CODE to both cases
+
+ // Generate the filename of the older version
+ std::string objFilenameOlder;
+ MakeObjectFilename(pentry->GetDependsOlder(), objFilenameOlder);
+ // Open it twice (it's the diff)
+ std::auto_ptr<RaidFileRead> pdiff(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
+ std::auto_ptr<RaidFileRead> pdiff2(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
+ // Open this file
+ std::string objFilename;
+ MakeObjectFilename(ObjectID, objFilename);
+ std::auto_ptr<RaidFileRead> pobjectBeingDeleted(RaidFileRead::Open(mStoreDiscSet, objFilename));
+ // And open a write file to overwrite the other directory entry
+ padjustedEntry.reset(new RaidFileWrite(mStoreDiscSet,
+ objFilenameOlder, mNewRefCounts[ObjectID]));
+ padjustedEntry->Open(true /* allow overwriting */);
+
+ if(pentry->GetDependsNewer() == 0)
+ {
+ // There exists an older version which depends on this one. Need to combine the two over that one.
+ BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, *padjustedEntry);
+ }
+ else
+ {
+ // This entry is in the middle of a chain, and two patches need combining.
+ BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry);
+ }
+ // The file will be committed later when the directory is safely commited.
+
+ // Work out the adjusted size
+ int64_t newSize = padjustedEntry->GetDiscUsageInBlocks();
+ int64_t sizeDelta = newSize - polder->GetSizeInBlocks();
+ mBlocksUsedDelta += sizeDelta;
+ if((polder->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0)
+ {
+ mBlocksInDeletedFilesDelta += sizeDelta;
+ }
+ if((polder->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) != 0)
+ {
+ mBlocksInOldFilesDelta += sizeDelta;
+ }
+ polder->SetSizeInBlocks(newSize);
+ }
+
+ // pentry no longer valid
+ }
+
+ // Delete it from the directory
+ rDirectory.DeleteEntry(ObjectID);
+
+ // Save directory back to disc
+ // BLOCK
+ int64_t dirRevisedSize = 0;
+ {
+ RaidFileWrite writeDir(mStoreDiscSet, rDirectoryFilename,
+ mNewRefCounts[InDirectory]);
+ writeDir.Open(true /* allow overwriting */);
+ rDirectory.WriteToStream(writeDir);
+
+ // get the disc usage (must do this before commiting it)
+ dirRevisedSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // adjust usage counts for this directory
+ if(dirRevisedSize > 0)
+ {
+ int64_t adjust = dirRevisedSize - OriginalDirSizeInBlocks;
+ mBlocksUsedDelta += adjust;
+ mBlocksInDirectoriesDelta += adjust;
+ }
+ }
+
+ // Commit any new adjusted entry
+ if(padjustedEntry.get() != 0)
+ {
+ padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ padjustedEntry.reset(); // delete it now
+ }
+
+ // Drop reference count by one. If it reaches zero, delete the file.
+ if(--mNewRefCounts[ObjectID] == 0)
+ {
+ // Delete from disc
+ BOX_TRACE("Removing unreferenced object " <<
+ BOX_FORMAT_OBJECTID(ObjectID));
+ std::string objFilename;
+ MakeObjectFilename(ObjectID, objFilename);
+ RaidFileWrite del(mStoreDiscSet, objFilename,
+ mNewRefCounts[ObjectID]);
+ del.Delete();
+ }
+ else
+ {
+ BOX_TRACE("Preserving object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) << " with " <<
+ mNewRefCounts[ObjectID] << " references");
+ }
+
+ // Adjust counts for the file
+ ++mFilesDeleted;
+ mBlocksUsedDelta -= deletedFileSizeInBlocks;
+ if(wasDeleted) mBlocksInDeletedFilesDelta -= deletedFileSizeInBlocks;
+ if(wasOldVersion) mBlocksInOldFilesDelta -= deletedFileSizeInBlocks;
+
+ // Delete the directory?
+ // Do this if... dir has zero entries, and is marked as deleted in it's containing directory
+ if(rDirectory.GetNumberOfEntries() == 0)
+ {
+ // Candidate for deletion
+ mEmptyDirectories.push_back(InDirectory);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteEmptyDirectories()
+// Purpose: Remove any empty directories which are also marked as deleted in their containing directory,
+// returning true if the opertaion was interrupted
+// Created: 15/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DeleteEmptyDirectories()
+{
+ while(mEmptyDirectories.size() > 0)
+ {
+ std::vector<int64_t> toExamine;
+
+ // Go through list
+ for(std::vector<int64_t>::const_iterator i(mEmptyDirectories.begin()); i != mEmptyDirectories.end(); ++i)
+ {
+#ifndef WIN32
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck = POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+ // Check for having to stop
+ if(mpHousekeepingCallback && mpHousekeepingCallback->CheckForInterProcessMsg(mAccountID)) // include account ID here as the specified account is now locked
+ {
+ // Need to abort now
+ return true;
+ }
+ }
+#endif
+
+ // Do not delete the root directory
+ if(*i == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ continue;
+ }
+
+ DeleteEmptyDirectory(*i, toExamine);
+ }
+
+ // Remove contents of empty directories
+ mEmptyDirectories.clear();
+ // Swap in new, so it's examined next time round
+ mEmptyDirectories.swap(toExamine);
+ }
+
+ // Not interrupted
+ return false;
+}
+
+void HousekeepStoreAccount::DeleteEmptyDirectory(int64_t dirId,
+ std::vector<int64_t>& rToExamine)
+{
+ // Load up the directory to potentially delete
+ std::string dirFilename;
+ BackupStoreDirectory dir;
+ int64_t dirSizeInBlocks = 0;
+
+ // BLOCK
+ {
+ MakeObjectFilename(dirId, dirFilename);
+ // Check it actually exists (just in case it gets
+ // added twice to the list)
+ if(!RaidFileRead::FileExists(mStoreDiscSet, dirFilename))
+ {
+ // doesn't exist, next!
+ return;
+ }
+ // load
+ std::auto_ptr<RaidFileRead> dirStream(
+ RaidFileRead::Open(mStoreDiscSet, dirFilename));
+ dirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
+ dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
+ }
+
+ // Make sure this directory is actually empty
+ if(dir.GetNumberOfEntries() != 0)
+ {
+ // Not actually empty, try next one
+ return;
+ }
+
+ // Candidate for deletion... open containing directory
+ std::string containingDirFilename;
+ BackupStoreDirectory containingDir;
+ int64_t containingDirSizeInBlocksOrig = 0;
+ {
+ MakeObjectFilename(dir.GetContainerID(), containingDirFilename);
+ std::auto_ptr<RaidFileRead> containingDirStream(
+ RaidFileRead::Open(mStoreDiscSet,
+ containingDirFilename));
+ containingDirSizeInBlocksOrig =
+ containingDirStream->GetDiscUsageInBlocks();
+ containingDir.ReadFromStream(*containingDirStream,
+ IOStream::TimeOutInfinite);
+ }
+
+ // Find the entry
+ BackupStoreDirectory::Entry *pdirentry =
+ containingDir.FindEntryByID(dir.GetObjectID());
+ if((pdirentry != 0) && ((pdirentry->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0))
+ {
+ // Should be deleted
+ containingDir.DeleteEntry(dir.GetObjectID());
+
+ // Is the containing dir now a candidate for deletion?
+ if(containingDir.GetNumberOfEntries() == 0)
+ {
+ rToExamine.push_back(containingDir.GetObjectID());
+ }
+
+ // Write revised parent directory
+ RaidFileWrite writeDir(mStoreDiscSet, containingDirFilename,
+ mNewRefCounts[containingDir.GetObjectID()]);
+ writeDir.Open(true /* allow overwriting */);
+ containingDir.WriteToStream(writeDir);
+
+ // get the disc usage (must do this before commiting it)
+ int64_t dirSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // adjust usage counts for this directory
+ if(dirSize > 0)
+ {
+ int64_t adjust = dirSize - containingDirSizeInBlocksOrig;
+ mBlocksUsedDelta += adjust;
+ mBlocksInDirectoriesDelta += adjust;
+ }
+
+
+ if (--mNewRefCounts[dir.GetObjectID()] == 0)
+ {
+ // Delete the directory itself
+ RaidFileWrite del(mStoreDiscSet, dirFilename,
+ mNewRefCounts[dir.GetObjectID()]);
+ del.Delete();
+ }
+
+ BOX_INFO("Housekeeping removed empty deleted dir " <<
+ BOX_FORMAT_OBJECTID(dirId));
+
+ // And adjust usage counts for the directory that's
+ // just been deleted
+ mBlocksUsedDelta -= dirSizeInBlocks;
+ mBlocksInDirectoriesDelta -= dirSizeInBlocks;
+
+ // Update count
+ ++mEmptyDirectoriesDeleted;
+ }
+}
+
diff --git a/lib/backupstore/HousekeepStoreAccount.h b/lib/backupstore/HousekeepStoreAccount.h
new file mode 100644
index 00000000..cfa05a2e
--- /dev/null
+++ b/lib/backupstore/HousekeepStoreAccount.h
@@ -0,0 +1,113 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: HousekeepStoreAccount.h
+// Purpose: Action class to perform housekeeping on a store account
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef HOUSEKEEPSTOREACCOUNT__H
+#define HOUSEKEEPSTOREACCOUNT__H
+
+#include <string>
+#include <set>
+#include <vector>
+
+class BackupStoreDirectory;
+
+class HousekeepingCallback
+{
+ public:
+ virtual ~HousekeepingCallback() {}
+ virtual bool CheckForInterProcessMsg(int AccountNum = 0, int MaximumWaitTime = 0) = 0;
+};
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: HousekeepStoreAccount
+// Purpose: Action class to perform housekeeping on a store account
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+class HousekeepStoreAccount
+{
+public:
+ HousekeepStoreAccount(int AccountID, const std::string &rStoreRoot,
+ int StoreDiscSet, HousekeepingCallback* pHousekeepingCallback);
+ ~HousekeepStoreAccount();
+
+ bool DoHousekeeping(bool KeepTryingForever = false);
+ int GetRefCountsAdjusted() { return mRefCountsAdjusted; }
+
+private:
+ // utility functions
+ void MakeObjectFilename(int64_t ObjectID, std::string &rFilenameOut);
+
+ bool ScanDirectory(int64_t ObjectID);
+ bool DeleteFiles();
+ bool DeleteEmptyDirectories();
+ void DeleteEmptyDirectory(int64_t dirId,
+ std::vector<int64_t>& rToExamine);
+ void DeleteFile(int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, int64_t OriginalDirSizeInBlocks);
+
+private:
+ typedef struct
+ {
+ int64_t mObjectID;
+ int64_t mInDirectory;
+ int64_t mSizeInBlocks;
+ int32_t mMarkNumber;
+ int32_t mVersionAgeWithinMark; // 0 == current, 1 latest old version, etc
+ bool mIsFlagDeleted; // false for files flagged "Old"
+ } DelEn;
+
+ struct DelEnCompare
+ {
+ bool operator()(const DelEn &x, const DelEn &y);
+ };
+
+ int mAccountID;
+ std::string mStoreRoot;
+ int mStoreDiscSet;
+ HousekeepingCallback* mpHousekeepingCallback;
+
+ int64_t mDeletionSizeTarget;
+
+ std::set<DelEn, DelEnCompare> mPotentialDeletions;
+ int64_t mPotentialDeletionsTotalSize;
+ int64_t mMaxSizeInPotentialDeletions;
+
+ // List of directories which are empty, and might be good for deleting
+ std::vector<int64_t> mEmptyDirectories;
+
+ // The re-calculated blocks used stats
+ int64_t mBlocksUsed;
+ int64_t mBlocksInOldFiles;
+ int64_t mBlocksInDeletedFiles;
+ int64_t mBlocksInDirectories;
+
+ // Deltas from deletion
+ int64_t mBlocksUsedDelta;
+ int64_t mBlocksInOldFilesDelta;
+ int64_t mBlocksInDeletedFilesDelta;
+ int64_t mBlocksInDirectoriesDelta;
+
+ // Deletion count
+ int64_t mFilesDeleted;
+ int64_t mEmptyDirectoriesDeleted;
+
+ // New reference count list
+ std::vector<uint32_t> mNewRefCounts;
+ bool mSuppressRefCountChangeWarnings;
+ int mRefCountsAdjusted;
+
+ // Poll frequency
+ int mCountUntilNextInterprocessMsgCheck;
+
+ Logging::Tagger mTagWithClientID;
+};
+
+#endif // HOUSEKEEPSTOREACCOUNT__H
+
diff --git a/lib/backupstore/Makefile.extra b/lib/backupstore/Makefile.extra
new file mode 100644
index 00000000..c55fd549
--- /dev/null
+++ b/lib/backupstore/Makefile.extra
@@ -0,0 +1,15 @@
+MAKEPROTOCOL = ../../lib/server/makeprotocol.pl
+
+GEN_CMD = $(MAKEPROTOCOL) backupprotocol.txt
+
+# AUTOGEN SEEDING
+autogen_BackupProtocol.cpp autogen_BackupProtocol.h: $(MAKEPROTOCOL) backupprotocol.txt
+ $(_PERL) $(GEN_CMD)
+
+
+MAKEEXCEPTION = ../../lib/common/makeexception.pl
+
+# AUTOGEN SEEDING
+autogen_BackupStoreException.h autogen_BackupStoreException.cpp: $(MAKEEXCEPTION) BackupStoreException.txt
+ $(_PERL) $(MAKEEXCEPTION) BackupStoreException.txt
+
diff --git a/lib/backupstore/RunStatusProvider.h b/lib/backupstore/RunStatusProvider.h
new file mode 100644
index 00000000..89f361ca
--- /dev/null
+++ b/lib/backupstore/RunStatusProvider.h
@@ -0,0 +1,29 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: RunStatusProvider.h
+// Purpose: Declares the RunStatusProvider interface.
+// Created: 2008/08/14
+//
+// --------------------------------------------------------------------------
+
+#ifndef RUNSTATUSPROVIDER__H
+#define RUNSTATUSPROVIDER__H
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: RunStatusProvider
+// Purpose: Provides a StopRun() method which returns true if
+// the current backup should be halted.
+// Created: 2005/11/15
+//
+// --------------------------------------------------------------------------
+class RunStatusProvider
+{
+ public:
+ virtual ~RunStatusProvider() { }
+ virtual bool StopRun() = 0;
+};
+
+#endif // RUNSTATUSPROVIDER__H
diff --git a/lib/backupstore/backupprotocol.txt b/lib/backupstore/backupprotocol.txt
new file mode 100644
index 00000000..aa987e70
--- /dev/null
+++ b/lib/backupstore/backupprotocol.txt
@@ -0,0 +1,235 @@
+#
+# backup protocol definition
+#
+
+Name Backup
+IdentString Box-Backup:v=C
+ServerContextClass BackupStoreContext BackupStoreContext.h
+
+AddType Filename BackupStoreFilenameClear BackupStoreFilenameClear.h
+
+ImplementLog Server syslog
+ImplementLog Client syslog
+ImplementLog Client file
+
+LogTypeToText Filename "%s" VAR.GetClearFilenameIfPossible("OPAQUE").c_str()
+
+BEGIN_OBJECTS
+
+# -------------------------------------------------------------------------------------
+# Session commands
+# -------------------------------------------------------------------------------------
+
+Error 0 IsError(Type,SubType) Reply
+ int32 Type
+ int32 SubType
+ CONSTANT ErrorType 1000
+ CONSTANT Err_WrongVersion 1
+ CONSTANT Err_NotInRightProtocolPhase 2
+ CONSTANT Err_BadLogin 3
+ CONSTANT Err_CannotLockStoreForWriting 4
+ CONSTANT Err_SessionReadOnly 5
+ CONSTANT Err_FileDoesNotVerify 6
+ CONSTANT Err_DoesNotExist 7
+ CONSTANT Err_DirectoryAlreadyExists 8
+ CONSTANT Err_CannotDeleteRoot 9
+ CONSTANT Err_TargetNameExists 10
+ CONSTANT Err_StorageLimitExceeded 11
+ CONSTANT Err_DiffFromFileDoesNotExist 12
+ CONSTANT Err_DoesNotExistInDirectory 13
+ CONSTANT Err_PatchConsistencyError 14
+ CONSTANT Err_MultiplyReferencedObject 15
+ CONSTANT Err_DisabledAccount 16
+
+Version 1 Command(Version) Reply
+ int32 Version
+
+
+Login 2 Command(LoginConfirmed)
+ int32 ClientID
+ int32 Flags
+ CONSTANT Flags_ReadOnly 1
+
+
+LoginConfirmed 3 Reply
+ int64 ClientStoreMarker
+ int64 BlocksUsed
+ int64 BlocksSoftLimit
+ int64 BlocksHardLimit
+
+
+Finished 4 Command(Finished) Reply EndsConversation
+
+
+# generic success object
+Success 5 Reply
+ int64 ObjectID
+
+
+SetClientStoreMarker 6 Command(Success)
+ int64 ClientStoreMarker
+
+
+# -------------------------------------------------------------------------------------
+# Generic object commands
+# -------------------------------------------------------------------------------------
+
+GetObject 10 Command(Success)
+ int64 ObjectID
+ CONSTANT NoObject 0
+ # reply has stream following, if ObjectID != NoObject
+
+
+MoveObject 11 Command(Success)
+ int64 ObjectID
+ int64 MoveFromDirectory
+ int64 MoveToDirectory
+ int32 Flags
+ Filename NewFilename
+
+ CONSTANT Flags_MoveAllWithSameName 1
+ CONSTANT Flags_AllowMoveOverDeletedObject 2
+
+# consider this an object command as, although it deals with directory entries,
+# it's not specific to either a file or a directory
+
+
+GetObjectName 12 Command(ObjectName)
+ int64 ObjectID
+ int64 ContainingDirectoryID
+ CONSTANT ObjectID_DirectoryOnly 0
+
+ # set ObjectID to ObjectID_DirectoryOnly to only get info on the directory
+
+
+ObjectName 13 Reply
+ int32 NumNameElements
+ int64 ModificationTime
+ int64 AttributesHash
+ int16 Flags
+ # NumNameElements is zero if the object doesn't exist
+ CONSTANT NumNameElements_ObjectDoesntExist 0
+ # a stream of Filename objects follows, if and only if NumNameElements > 0
+
+
+# -------------------------------------------------------------------------------------
+# Directory commands
+# -------------------------------------------------------------------------------------
+
+CreateDirectory 20 Command(Success) StreamWithCommand
+ int64 ContainingDirectoryID
+ int64 AttributesModTime
+ Filename DirectoryName
+ # stream following containing attributes
+
+
+ListDirectory 21 Command(Success)
+ int64 ObjectID
+ int16 FlagsMustBeSet
+ int16 FlagsNotToBeSet
+ bool SendAttributes
+ # make sure these flags are synced with those in BackupStoreDirectory
+ CONSTANT Flags_INCLUDE_EVERYTHING -1
+ CONSTANT Flags_EXCLUDE_NOTHING 0
+ CONSTANT Flags_EXCLUDE_EVERYTHING 15
+ CONSTANT Flags_File 1
+ CONSTANT Flags_Dir 2
+ CONSTANT Flags_Deleted 4
+ CONSTANT Flags_OldVersion 8
+ # make sure this is the same as in BackupStoreConstants.h
+ CONSTANT RootDirectory 1
+
+ # reply has stream following Success object, containing a stored BackupStoreDirectory
+
+
+ChangeDirAttributes 22 Command(Success) StreamWithCommand
+ int64 ObjectID
+ int64 AttributesModTime
+ # stream following containing attributes
+
+
+DeleteDirectory 23 Command(Success)
+ int64 ObjectID
+
+UndeleteDirectory 24 Command(Success)
+ int64 ObjectID
+ # may not have exactly the desired effect if files within in have been deleted before the directory was deleted.
+
+
+# -------------------------------------------------------------------------------------
+# File commands
+# -------------------------------------------------------------------------------------
+
+StoreFile 30 Command(Success) StreamWithCommand
+ int64 DirectoryObjectID
+ int64 ModificationTime
+ int64 AttributesHash
+ int64 DiffFromFileID # 0 if the file is not a diff
+ Filename Filename
+ # then send a stream containing the encoded file
+
+
+GetFile 31 Command(Success)
+ int64 InDirectory
+ int64 ObjectID
+ # error returned if not a file, or does not exist
+ # reply has stream following, containing an encoded file IN STREAM ORDER
+ # (use GetObject to get it in file order)
+
+
+SetReplacementFileAttributes 32 Command(Success) StreamWithCommand
+ int64 InDirectory
+ int64 AttributesHash
+ Filename Filename
+ # stream follows containing attributes
+
+
+DeleteFile 33 Command(Success)
+ int64 InDirectory
+ Filename Filename
+ # will return 0 if the object couldn't be found in the specified directory
+
+
+GetBlockIndexByID 34 Command(Success)
+ int64 ObjectID
+
+ # stream of the block index follows the reply
+ # returns an error if the object didn't exist
+
+
+GetBlockIndexByName 35 Command(Success)
+ int64 InDirectory
+ Filename Filename
+
+ # Success object contains the found ID -- or 0 if the entry wasn't found in the directory
+ # stream of the block index follows the reply if found ID != 0
+
+
+UndeleteFile 36 Command(Success)
+ int64 InDirectory
+ int64 ObjectID
+ # will return 0 if the object couldn't be found in the specified directory
+
+
+# -------------------------------------------------------------------------------------
+# Information commands
+# -------------------------------------------------------------------------------------
+
+GetAccountUsage 40 Command(AccountUsage)
+ # no data members
+
+AccountUsage 41 Reply
+ int64 BlocksUsed
+ int64 BlocksInOldFiles
+ int64 BlocksInDeletedFiles
+ int64 BlocksInDirectories
+ int64 BlocksSoftLimit
+ int64 BlocksHardLimit
+ int32 BlockSize
+
+GetIsAlive 42 Command(IsAlive)
+ # no data members
+
+IsAlive 43 Reply
+ # no data members
+