summaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorBen Summers <ben@fluffy.co.uk>2005-10-14 08:50:54 +0000
committerBen Summers <ben@fluffy.co.uk>2005-10-14 08:50:54 +0000
commit99f8ce096bc5569adbfea1911dbcda24c28d8d8b (patch)
tree049c302161fea1f2f6223e1e8f3c40d9e8aadc8b /bin
Box Backup 0.09 with a few tweeks
Diffstat (limited to 'bin')
-rwxr-xr-xbin/bbackupctl/bbackupctl.cpp217
-rwxr-xr-xbin/bbackupd/BackupClientContext.cpp453
-rwxr-xr-xbin/bbackupd/BackupClientContext.h156
-rwxr-xr-xbin/bbackupd/BackupClientDeleteList.cpp195
-rwxr-xr-xbin/bbackupd/BackupClientDeleteList.h51
-rwxr-xr-xbin/bbackupd/BackupClientDirectoryRecord.cpp1203
-rwxr-xr-xbin/bbackupd/BackupClientDirectoryRecord.h115
-rwxr-xr-xbin/bbackupd/BackupClientInodeToIDMap.cpp279
-rwxr-xr-xbin/bbackupd/BackupClientInodeToIDMap.h67
-rwxr-xr-xbin/bbackupd/BackupDaemon.cpp1624
-rwxr-xr-xbin/bbackupd/BackupDaemon.h166
-rwxr-xr-xbin/bbackupd/bbackupd-config525
-rwxr-xr-xbin/bbackupd/bbackupd.cpp26
-rw-r--r--bin/bbackupobjdump/bbackupobjdump.cpp82
-rwxr-xr-xbin/bbackupquery/BackupQueries.cpp1700
-rwxr-xr-xbin/bbackupquery/BackupQueries.h101
-rwxr-xr-xbin/bbackupquery/Makefile.extra6
-rwxr-xr-xbin/bbackupquery/bbackupquery.cpp243
-rwxr-xr-xbin/bbackupquery/documentation.txt165
-rwxr-xr-xbin/bbackupquery/makedocumentation.pl75
-rwxr-xr-xbin/bbstoreaccounts/bbstoreaccounts.cpp548
-rwxr-xr-xbin/bbstored/BBStoreDHousekeeping.cpp175
-rwxr-xr-xbin/bbstored/BackupCommands.cpp861
-rwxr-xr-xbin/bbstored/BackupConstants.h23
-rwxr-xr-xbin/bbstored/BackupContext.cpp1650
-rwxr-xr-xbin/bbstored/BackupContext.h149
-rwxr-xr-xbin/bbstored/BackupStoreDaemon.cpp284
-rwxr-xr-xbin/bbstored/BackupStoreDaemon.h77
-rwxr-xr-xbin/bbstored/HousekeepStoreAccount.cpp844
-rwxr-xr-xbin/bbstored/HousekeepStoreAccount.h97
-rwxr-xr-xbin/bbstored/Makefile.extra9
-rwxr-xr-xbin/bbstored/backupprotocol.txt221
-rwxr-xr-xbin/bbstored/bbstored-certs319
-rwxr-xr-xbin/bbstored/bbstored-config242
-rwxr-xr-xbin/bbstored/bbstored.cpp25
35 files changed, 12973 insertions, 0 deletions
diff --git a/bin/bbackupctl/bbackupctl.cpp b/bin/bbackupctl/bbackupctl.cpp
new file mode 100755
index 00000000..0dc4f98d
--- /dev/null
+++ b/bin/bbackupctl/bbackupctl.cpp
@@ -0,0 +1,217 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbackupctl.cpp
+// Purpose: bbackupd daemon control program
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "MainHelper.h"
+#include "BoxPortsAndFiles.h"
+#include "BackupDaemonConfigVerify.h"
+#include "Socket.h"
+#include "SocketStream.h"
+#include "IOStreamGetLine.h"
+
+#include "MemLeakFindOn.h"
+
+void PrintUsageAndExit()
+{
+ printf("Usage: bbackupctl [-q] [-c config_file] <command>\n"
+ "Commands are:\n"
+ " sync -- start a syncronisation run now\n"
+ " force-sync -- force the start of a syncronisation run, even if SyncAllowScript says no\n"
+ " reload -- reload daemon configuration\n"
+ " terminate -- terminate daemon now\n"
+ " wait-for-sync -- wait until the next sync starts, then exit\n"
+ );
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ int returnCode = 0;
+
+ MAINHELPER_SETUP_MEMORY_LEAK_EXIT_REPORT("bbackupctl.memleaks", "bbackupctl")
+
+ MAINHELPER_START
+
+ // Filename for configuraiton file?
+ const char *configFilename = BOX_FILE_BBACKUPD_DEFAULT_CONFIG;
+
+ // Quiet?
+ bool quiet = false;
+
+ // See if there's another entry on the command line
+ int c;
+ while((c = getopt(argc, (char * const *)argv, "qc:l:")) != -1)
+ {
+ switch(c)
+ {
+ case 'q':
+ // Quiet mode
+ quiet = true;
+ break;
+
+ case 'c':
+ // store argument
+ configFilename = optarg;
+ break;
+
+ case '?':
+ default:
+ PrintUsageAndExit();
+ }
+ }
+ // Adjust arguments
+ argc -= optind;
+ argv += optind;
+
+ // Check there's a command
+ if(argc != 1)
+ {
+ PrintUsageAndExit();
+ }
+
+ // Read in the configuration file
+ if(!quiet) printf("Using configuration file %s\n", configFilename);
+ std::string errs;
+ std::auto_ptr<Configuration> config(Configuration::LoadAndVerify(configFilename, &BackupDaemonConfigVerify, errs));
+ if(config.get() == 0 || !errs.empty())
+ {
+ printf("Invalid configuration file:\n%s", errs.c_str());
+ return 1;
+ }
+ // Easier coding
+ const Configuration &conf(*config);
+
+ // Check there's a socket defined in the config file
+ if(!conf.KeyExists("CommandSocket"))
+ {
+ printf("Daemon isn't using a control socket, could not execute command.\nAdd a CommandSocket declaration to the bbackupd.conf file.\n");
+ return 1;
+ }
+
+ // Connect to socket
+ SocketStream connection;
+ try
+ {
+ connection.Open(Socket::TypeUNIX, conf.GetKeyValue("CommandSocket").c_str());
+ }
+ catch(...)
+ {
+ printf("Failed to connect to daemon control socket.\n" \
+ "Possible causes:\n" \
+ " * Daemon not running\n" \
+ " * Daemon busy syncing with store server\n" \
+ " * Another bbackupctl process is communicating with the daemon\n" \
+ " * Daemon is waiting to recover from an error\n"
+ );
+ return 1;
+ }
+
+ // For receiving data
+ IOStreamGetLine getLine(connection);
+
+ // Wait for the configuration summary
+ std::string configSummary;
+ if(!getLine.GetLine(configSummary))
+ {
+ printf("Failed to receive configuration summary from daemon\n");
+ return 1;
+ }
+
+ // Was the connection rejected by the server?
+ if(getLine.IsEOF())
+ {
+ printf("Server rejected the connection. Are you running bbackupctl as the same user as the daemon?\n");
+ return 1;
+ }
+
+ // Decode it
+ int autoBackup, updateStoreInterval, minimumFileAge, maxUploadWait;
+ if(::sscanf(configSummary.c_str(), "bbackupd: %d %d %d %d", &autoBackup,
+ &updateStoreInterval, &minimumFileAge, &maxUploadWait) != 4)
+ {
+ printf("Config summary didn't decode\n");
+ return 1;
+ }
+ // Print summary?
+ if(!quiet)
+ {
+ printf("Daemon configuration summary:\n" \
+ " AutomaticBackup = %s\n" \
+ " UpdateStoreInterval = %d seconds\n" \
+ " MinimumFileAge = %d seconds\n" \
+ " MaxUploadWait = %d seconds\n",
+ autoBackup?"true":"false", updateStoreInterval, minimumFileAge, maxUploadWait);
+ }
+
+ // Is the command the "wait for sync to start" command?
+ bool areWaitingForSync = false;
+ if(::strcmp(argv[0], "wait-for-sync") == 0)
+ {
+ // Check that it's not in non-automatic mode, because then it'll never start
+ if(!autoBackup)
+ {
+ printf("ERROR: Daemon is not in automatic mode -- sync will never start!\n");
+ return 1;
+ }
+
+ // Yes... set the flag so we know what we're waiting for a sync to start
+ areWaitingForSync = true;
+ }
+ else
+ {
+ // No? Just send the command given plus a quit command.
+ std::string cmd(argv[0]);
+ cmd += "\nquit\n";
+ connection.Write(cmd.c_str(), cmd.size());
+ }
+
+ // Read the response
+ std::string line;
+ while(!getLine.IsEOF() && getLine.GetLine(line))
+ {
+ if(areWaitingForSync)
+ {
+ // Need to wait for the state change...
+ if(line == "start-sync")
+ {
+ // Send a quit command to finish nicely
+ connection.Write("quit\n", 5);
+
+ // And we're done
+ break;
+ }
+ }
+ else
+ {
+ // Is this an OK or error line?
+ if(line == "ok")
+ {
+ if(!quiet)
+ {
+ printf("Succeeded.\n");
+ }
+ break;
+ }
+ else if(line == "error")
+ {
+ printf("ERROR. (Check command spelling)\n");
+ returnCode = 1;
+ break;
+ }
+ }
+ }
+
+ MAINHELPER_END
+
+ return returnCode;
+}
diff --git a/bin/bbackupd/BackupClientContext.cpp b/bin/bbackupd/BackupClientContext.cpp
new file mode 100755
index 00000000..08a203c1
--- /dev/null
+++ b/bin/bbackupd/BackupClientContext.cpp
@@ -0,0 +1,453 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientContext.cpp
+// Purpose: Keep track of context
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <syslog.h>
+
+#include "BoxPortsAndFiles.h"
+#include "BoxTime.h"
+#include "BackupClientContext.h"
+#include "SocketStreamTLS.h"
+#include "Socket.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreException.h"
+#include "BackupDaemon.h"
+#include "autogen_BackupProtocolClient.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::BackupClientContext(BackupDaemon &, TLSContext &, const std::string &, int32_t, bool)
+// Purpose: Constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientContext::BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
+ int32_t AccountNumber, bool ExtendedLogging)
+ : mrDaemon(rDaemon),
+ mrTLSContext(rTLSContext),
+ mHostname(rHostname),
+ mAccountNumber(AccountNumber),
+ mpSocket(0),
+ mpConnection(0),
+ mExtendedLogging(ExtendedLogging),
+ mClientStoreMarker(ClientStoreMarker_NotKnown),
+ mpDeleteList(0),
+ mpCurrentIDMap(0),
+ mpNewIDMap(0),
+ mStorageLimitExceeded(false),
+ mpExcludeFiles(0),
+ mpExcludeDirs(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::~BackupClientContext()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientContext::~BackupClientContext()
+{
+ CloseAnyOpenConnection();
+
+ // Delete delete list
+ if(mpDeleteList != 0)
+ {
+ delete mpDeleteList;
+ mpDeleteList = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetConnection()
+// Purpose: Returns the connection, making the connection and logging into
+// the backup store if necessary.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupProtocolClient &BackupClientContext::GetConnection()
+{
+ // Already got it? Just return it.
+ if(mpConnection != 0)
+ {
+ return *mpConnection;
+ }
+
+ // Get a socket connection
+ if(mpSocket == 0)
+ {
+ mpSocket = new SocketStreamTLS;
+ ASSERT(mpSocket != 0); // will have exceptioned if this was a problem
+ }
+
+ try
+ {
+ // Defensive.
+ if(mpConnection != 0)
+ {
+ delete mpConnection;
+ mpConnection = 0;
+ }
+
+ // Log intention
+ ::syslog(LOG_INFO, "Opening connection to server %s...", mHostname.c_str());
+
+ // Connect!
+ mpSocket->Open(mrTLSContext, Socket::TypeINET, mHostname.c_str(), BOX_PORT_BBSTORED);
+
+ // And create a procotol object
+ mpConnection = new BackupProtocolClient(*mpSocket);
+
+ // Set logging option
+ mpConnection->SetLogToSysLog(mExtendedLogging);
+
+ // Handshake
+ mpConnection->Handshake();
+
+ // Check the version of the server
+ {
+ std::auto_ptr<BackupProtocolClientVersion> serverVersion(mpConnection->QueryVersion(BACKUP_STORE_SERVER_VERSION));
+ if(serverVersion->GetVersion() != BACKUP_STORE_SERVER_VERSION)
+ {
+ THROW_EXCEPTION(BackupStoreException, WrongServerVersion)
+ }
+ }
+
+ // Login -- if this fails, the Protocol will exception
+ std::auto_ptr<BackupProtocolClientLoginConfirmed> loginConf(mpConnection->QueryLogin(mAccountNumber, 0 /* read/write */));
+
+ // Check that the client store marker is the one we expect
+ if(mClientStoreMarker != ClientStoreMarker_NotKnown)
+ {
+ if(loginConf->GetClientStoreMarker() != mClientStoreMarker)
+ {
+ // Not good... finish the connection, abort, etc, ignoring errors
+ try
+ {
+ mpConnection->QueryFinished();
+ mpSocket->Shutdown();
+ mpSocket->Close();
+ }
+ catch(...)
+ {
+ // IGNORE
+ }
+
+ // Then throw an exception about this
+ THROW_EXCEPTION(BackupStoreException, ClientMarkerNotAsExpected)
+ }
+ }
+
+ // Log success
+ ::syslog(LOG_INFO, "Connection made, login successful");
+
+ // Check to see if there is any space available on the server
+ int64_t softLimit = loginConf->GetBlocksSoftLimit();
+ int64_t hardLimit = loginConf->GetBlocksHardLimit();
+ // Threshold for uploading new stuff
+ int64_t stopUploadThreshold = softLimit + ((hardLimit - softLimit) / 3);
+ if(loginConf->GetBlocksUsed() > stopUploadThreshold)
+ {
+ // no -- flag so only things like deletions happen
+ mStorageLimitExceeded = true;
+ // Log
+ ::syslog(LOG_INFO, "Exceeded storage limits on server -- not uploading changes to files");
+ }
+ }
+ catch(...)
+ {
+ // Clean up.
+ delete mpConnection;
+ mpConnection = 0;
+ delete mpSocket;
+ mpSocket = 0;
+ throw;
+ }
+
+ return *mpConnection;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::CloseAnyOpenConnection()
+// Purpose: Closes a connection, if it's open
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupClientContext::CloseAnyOpenConnection()
+{
+ if(mpConnection)
+ {
+ try
+ {
+ // Need to set a client store marker?
+ if(mClientStoreMarker == ClientStoreMarker_NotKnown)
+ {
+ // Yes, choose one, the current time will do
+ int64_t marker = GetCurrentBoxTime();
+
+ // Set it on the store
+ mpConnection->QuerySetClientStoreMarker(marker);
+
+ // Record it so that it can be picked up later.
+ mClientStoreMarker = marker;
+ }
+
+ // Quit nicely
+ mpConnection->QueryFinished();
+ }
+ catch(...)
+ {
+ // Ignore errors here
+ }
+
+ // Delete it anyway.
+ delete mpConnection;
+ mpConnection = 0;
+ }
+
+ if(mpSocket)
+ {
+ try
+ {
+ // Be nice about closing the socket
+ mpSocket->Shutdown();
+ mpSocket->Close();
+ }
+ catch(...)
+ {
+ // Ignore errors
+ }
+
+ // Delete object
+ delete mpSocket;
+ mpSocket = 0;
+ }
+
+ // Delete any pending list
+ if(mpDeleteList != 0)
+ {
+ delete mpDeleteList;
+ mpDeleteList = 0;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetTimeout()
+// Purpose: Gets the current timeout time.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+int BackupClientContext::GetTimeout() const
+{
+ if(mpConnection)
+ {
+ return mpConnection->GetTimeout();
+ }
+
+ return (15*60*1000);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetDeleteList()
+// Purpose: Returns the delete list, creating one if necessary
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList &BackupClientContext::GetDeleteList()
+{
+ // Already created?
+ if(mpDeleteList == 0)
+ {
+ mpDeleteList = new BackupClientDeleteList;
+ }
+
+ // Return reference to object
+ return *mpDeleteList;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name:
+// Purpose:
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientContext::PerformDeletions()
+{
+ // Got a list?
+ if(mpDeleteList == 0)
+ {
+ // Nothing to do
+ return;
+ }
+
+ // Delegate to the delete list object
+ mpDeleteList->PerformDeletions(*this);
+
+ // Delete the object
+ delete mpDeleteList;
+ mpDeleteList = 0;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetCurrentIDMap() const
+// Purpose: Return a (const) reference to the current ID map
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+const BackupClientInodeToIDMap &BackupClientContext::GetCurrentIDMap() const
+{
+ ASSERT(mpCurrentIDMap != 0);
+ if(mpCurrentIDMap == 0)
+ {
+ THROW_EXCEPTION(CommonException, Internal)
+ }
+ return *mpCurrentIDMap;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetNewIDMap() const
+// Purpose: Return a reference to the new ID map
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap &BackupClientContext::GetNewIDMap() const
+{
+ ASSERT(mpNewIDMap != 0);
+ if(mpNewIDMap == 0)
+ {
+ THROW_EXCEPTION(CommonException, Internal)
+ }
+ return *mpNewIDMap;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::FindFilename(int64_t, int64_t, std::string &, bool &) const
+// Purpose: Attempts to find the pathname of an object with a given ID on the server.
+// Returns true if it can be found, in which case rPathOut is the local filename,
+// and rIsDirectoryOut == true if the local object is a directory.
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupClientContext::FindFilename(int64_t ObjectID, int64_t ContainingDirectory, std::string &rPathOut, bool &rIsDirectoryOut,
+ bool &rIsCurrentVersionOut, box_time_t *pModTimeOnServer, box_time_t *pAttributesHashOnServer, BackupStoreFilenameClear *pLeafname)
+{
+ // Make a connection to the server
+ BackupProtocolClient &connection(GetConnection());
+
+ // Request filenames from the server, in a "safe" manner to ignore errors properly
+ {
+ BackupProtocolClientGetObjectName send(ObjectID, ContainingDirectory);
+ connection.Send(send);
+ }
+ std::auto_ptr<BackupProtocolObjectCl> preply(connection.Receive());
+
+ // Is it of the right type?
+ if(preply->GetType() != BackupProtocolClientObjectName::TypeID)
+ {
+ // Was an error or something
+ return false;
+ }
+
+ // Cast to expected type.
+ BackupProtocolClientObjectName *names = (BackupProtocolClientObjectName *)(preply.get());
+
+ // Anything found?
+ int32_t numElements = names->GetNumNameElements();
+ if(numElements <= 0)
+ {
+ // No.
+ return false;
+ }
+
+ // Get the stream containing all the names
+ std::auto_ptr<IOStream> nameStream(connection.ReceiveStream());
+
+ // Path
+ std::string path;
+
+ // Remember this is in reverse order!
+ for(int l = 0; l < numElements; ++l)
+ {
+ BackupStoreFilenameClear elementName;
+ elementName.ReadFromStream(*nameStream, GetTimeout());
+
+ // Store leafname for caller?
+ if(l == 0 && pLeafname)
+ {
+ *pLeafname = elementName;
+ }
+
+ // Is it part of the filename in the location?
+ if(l < (numElements - 1))
+ {
+ // Part of filename within
+ path = (path.empty())?(elementName.GetClearFilename()):(elementName.GetClearFilename() + DIRECTORY_SEPARATOR_ASCHAR + path);
+ }
+ else
+ {
+ // Location name -- look up in daemon's records
+ std::string locPath;
+ if(!mrDaemon.FindLocationPathName(elementName.GetClearFilename(), locPath))
+ {
+ // Didn't find the location... so can't give the local filename
+ return false;
+ }
+
+ // Add in location path
+ path = (path.empty())?(locPath):(locPath + DIRECTORY_SEPARATOR_ASCHAR + path);
+ }
+ }
+
+ // Is it a directory?
+ rIsDirectoryOut = ((names->GetFlags() & BackupProtocolClientListDirectory::Flags_Dir) == BackupProtocolClientListDirectory::Flags_Dir);
+
+ // Is it the current version?
+ rIsCurrentVersionOut = ((names->GetFlags() & (BackupProtocolClientListDirectory::Flags_OldVersion | BackupProtocolClientListDirectory::Flags_Deleted)) == 0);
+
+ // And other information which may be required
+ if(pModTimeOnServer) *pModTimeOnServer = names->GetModificationTime();
+ if(pAttributesHashOnServer) *pAttributesHashOnServer = names->GetAttributesHash();
+
+ // Tell caller about the pathname
+ rPathOut = path;
+
+ // Found
+ return true;
+}
+
+
diff --git a/bin/bbackupd/BackupClientContext.h b/bin/bbackupd/BackupClientContext.h
new file mode 100755
index 00000000..3933dbed
--- /dev/null
+++ b/bin/bbackupd/BackupClientContext.h
@@ -0,0 +1,156 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientContext.h
+// Purpose: Keep track of context
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTCONTEXT__H
+#define BACKUPCLIENTCONTEXT__H
+
+#include "BoxTime.h"
+#include "BackupClientDeleteList.h"
+#include "ExcludeList.h"
+
+class TLSContext;
+class BackupProtocolClient;
+class SocketStreamTLS;
+class BackupClientInodeToIDMap;
+class BackupDaemon;
+class BackupStoreFilenameClear;
+
+#include <string>
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientContext
+// Purpose:
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupClientContext
+{
+public:
+ BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
+ int32_t AccountNumber, bool ExtendedLogging);
+ ~BackupClientContext();
+private:
+ BackupClientContext(const BackupClientContext &);
+public:
+
+ BackupProtocolClient &GetConnection();
+
+ void CloseAnyOpenConnection();
+
+ int GetTimeout() const;
+
+ BackupClientDeleteList &GetDeleteList();
+ void PerformDeletions();
+
+ enum
+ {
+ ClientStoreMarker_NotKnown = 0
+ };
+
+ void SetClientStoreMarker(int64_t ClientStoreMarker) {mClientStoreMarker = ClientStoreMarker;}
+ int64_t GetClientStoreMarker() const {return mClientStoreMarker;}
+
+ bool StorageLimitExceeded() {return mStorageLimitExceeded;}
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::SetIDMaps(const BackupClientInodeToIDMap *, BackupClientInodeToIDMap *)
+ // Purpose: Store pointers to the Current and New ID maps
+ // Created: 11/11/03
+ //
+ // --------------------------------------------------------------------------
+ void SetIDMaps(const BackupClientInodeToIDMap *pCurrent, BackupClientInodeToIDMap *pNew)
+ {
+ ASSERT(pCurrent != 0);
+ ASSERT(pNew != 0);
+ mpCurrentIDMap = pCurrent;
+ mpNewIDMap = pNew;
+ }
+ const BackupClientInodeToIDMap &GetCurrentIDMap() const;
+ BackupClientInodeToIDMap &GetNewIDMap() const;
+
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::SetExcludeLists(ExcludeList *, ExcludeList *)
+ // Purpose: Sets the exclude lists for the operation. Can be 0.
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ void SetExcludeLists(ExcludeList *pExcludeFiles, ExcludeList *pExcludeDirs)
+ {
+ mpExcludeFiles = pExcludeFiles;
+ mpExcludeDirs = pExcludeDirs;
+ }
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::ExcludeFile(const std::string &)
+ // Purpose: Returns true is this file should be excluded from the backup
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ inline bool ExcludeFile(const std::string &rFullFilename)
+ {
+ if(mpExcludeFiles != 0)
+ {
+ return mpExcludeFiles->IsExcluded(rFullFilename);
+ }
+ // If no list, don't exclude anything
+ return false;
+ }
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::ExcludeDir(const std::string &)
+ // Purpose: Returns true is this directory should be excluded from the backup
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ inline bool ExcludeDir(const std::string &rFullDirName)
+ {
+ if(mpExcludeDirs != 0)
+ {
+ return mpExcludeDirs->IsExcluded(rFullDirName);
+ }
+ // If no list, don't exclude anything
+ return false;
+ }
+
+ // Utility functions -- may do a lot of work
+ bool FindFilename(int64_t ObjectID, int64_t ContainingDirectory, std::string &rPathOut, bool &rIsDirectoryOut,
+ bool &rIsCurrentVersionOut, box_time_t *pModTimeOnServer = 0, box_time_t *pAttributesHashOnServer = 0,
+ BackupStoreFilenameClear *pLeafname = 0); // not const as may connect to server
+
+private:
+ BackupDaemon &mrDaemon;
+ TLSContext &mrTLSContext;
+ std::string mHostname;
+ int32_t mAccountNumber;
+ SocketStreamTLS *mpSocket;
+ BackupProtocolClient *mpConnection;
+ bool mExtendedLogging;
+ int64_t mClientStoreMarker;
+ BackupClientDeleteList *mpDeleteList;
+ const BackupClientInodeToIDMap *mpCurrentIDMap;
+ BackupClientInodeToIDMap *mpNewIDMap;
+ bool mStorageLimitExceeded;
+ ExcludeList *mpExcludeFiles;
+ ExcludeList *mpExcludeDirs;
+};
+
+
+#endif // BACKUPCLIENTCONTEXT__H
+
diff --git a/bin/bbackupd/BackupClientDeleteList.cpp b/bin/bbackupd/BackupClientDeleteList.cpp
new file mode 100755
index 00000000..f6d8e0dc
--- /dev/null
+++ b/bin/bbackupd/BackupClientDeleteList.cpp
@@ -0,0 +1,195 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDeleteList.cpp
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <algorithm>
+
+#include "BackupClientDeleteList.h"
+#include "BackupClientContext.h"
+#include "autogen_BackupProtocolClient.h"
+
+#include "MemLeakFindOn.h"
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::BackupClientDeleteList()
+// Purpose: Constructor
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList::BackupClientDeleteList()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::~BackupClientDeleteList()
+// Purpose: Destructor
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList::~BackupClientDeleteList()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t)
+// Purpose: Add a directory to the list of directories to be deleted.
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
+{
+ // Only add the delete to the list if it's not in the "no delete" set
+ if(mDirectoryNoDeleteList.find(ObjectID) == mDirectoryNoDeleteList.end())
+ {
+ // Not in the list, so should delete it
+ mDirectoryList.push_back(ObjectID);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::AddFileDelete(int64_t, BackupStoreFilenameClear &)
+// Purpose:
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+{
+ // Try to find it in the no delete list
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileNoDeleteList.begin());
+ while(delEntry != mFileNoDeleteList.end())
+ {
+ if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ {
+ // Found!
+ break;
+ }
+ ++delEntry;
+ }
+
+ // Only add it to the delete list if it wasn't in the no delete list
+ if(delEntry == mFileNoDeleteList.end())
+ {
+ mFileList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
+// Purpose: Perform all the pending deletes
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
+{
+ // Anything to do?
+ if(mDirectoryList.empty() && mFileList.empty())
+ {
+ // Nothing!
+ return;
+ }
+
+ // Get a connection
+ BackupProtocolClient &connection(rContext.GetConnection());
+
+ // Do the deletes
+ for(std::vector<int64_t>::iterator i(mDirectoryList.begin()); i != mDirectoryList.end(); ++i)
+ {
+ connection.QueryDeleteDirectory(*i);
+ }
+
+ // Clear the directory list
+ mDirectoryList.clear();
+
+ // Delete the files
+ for(std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator i(mFileList.begin()); i != mFileList.end(); ++i)
+ {
+ connection.QueryDeleteFile(i->first, i->second);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::StopDirectoryDeletion(int64_t)
+// Purpose: Stop a directory being deleted
+// Created: 19/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
+{
+ // First of all, is it in the delete vector?
+ std::vector<int64_t>::iterator delEntry(std::find(mDirectoryList.begin(), mDirectoryList.end(), ObjectID));
+ if(delEntry != mDirectoryList.end())
+ {
+ // erase this entry
+ mDirectoryList.erase(delEntry);
+ }
+ else
+ {
+ // Haven't been asked to delete it yet, put it in the no delete list
+ mDirectoryNoDeleteList.insert(ObjectID);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::StopFileDeletion(int64_t, const BackupStoreFilename &)
+// Purpose: Stop a file from being deleted
+// Created: 19/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+{
+ // Find this in the delete list
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileList.begin());
+ while(delEntry != mFileList.end())
+ {
+ if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ {
+ // Found!
+ break;
+ }
+ ++delEntry;
+ }
+
+ if(delEntry != mFileList.end())
+ {
+ // erase this entry
+ mFileList.erase(delEntry);
+ }
+ else
+ {
+ // Haven't been asked to delete it yet, put it in the no delete list
+ mFileNoDeleteList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ }
+
+}
+
+
+
+
+
diff --git a/bin/bbackupd/BackupClientDeleteList.h b/bin/bbackupd/BackupClientDeleteList.h
new file mode 100755
index 00000000..5940cf50
--- /dev/null
+++ b/bin/bbackupd/BackupClientDeleteList.h
@@ -0,0 +1,51 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDeleteList.h
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTDELETELIST__H
+#define BACKUPCLIENTDELETELIST__H
+
+#include "BackupStoreFilename.h"
+
+class BackupClientContext;
+
+#include <vector>
+#include <utility>
+#include <set>
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientDeleteList
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+class BackupClientDeleteList
+{
+public:
+ BackupClientDeleteList();
+ ~BackupClientDeleteList();
+
+ void AddDirectoryDelete(int64_t ObjectID);
+ void AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+
+ void StopDirectoryDeletion(int64_t ObjectID);
+ void StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+
+ void PerformDeletions(BackupClientContext &rContext);
+
+private:
+ std::vector<int64_t> mDirectoryList;
+ std::set<int64_t> mDirectoryNoDeleteList; // note: things only get in this list if they're not present in mDirectoryList when they are 'added'
+ std::vector<std::pair<int64_t, BackupStoreFilename> > mFileList;
+ std::vector<std::pair<int64_t, BackupStoreFilename> > mFileNoDeleteList;
+};
+
+#endif // BACKUPCLIENTDELETELIST__H
+
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.cpp b/bin/bbackupd/BackupClientDirectoryRecord.cpp
new file mode 100755
index 00000000..eb4a8343
--- /dev/null
+++ b/bin/bbackupd/BackupClientDirectoryRecord.cpp
@@ -0,0 +1,1203 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDirectoryRecord.cpp
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <string.h>
+
+#include "BackupClientDirectoryRecord.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupClientContext.h"
+#include "IOStream.h"
+#include "MemBlockStream.h"
+#include "CommonException.h"
+#include "CollectInBufferStream.h"
+#include "BackupStoreFile.h"
+#include "BackupClientInodeToIDMap.h"
+#include "FileModificationTime.h"
+#include "BackupDaemon.h"
+#include "BackupStoreException.h"
+
+#ifdef PLATFORM_LINUX
+ #include "LinuxWorkaround.h"
+#endif
+
+#include "MemLeakFindOn.h"
+
+typedef std::map<std::string, BackupStoreDirectory::Entry *> DecryptedEntriesMap_t;
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::BackupClientDirectoryRecord()
+// Purpose: Constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::BackupClientDirectoryRecord(int64_t ObjectID, const std::string &rSubDirName)
+ : mObjectID(ObjectID),
+ mSubDirName(rSubDirName),
+ mInitialSyncDone(false),
+ mSyncDone(false),
+ mpPendingEntries(0)
+{
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
+{
+ // Make deletion recursive
+ DeleteSubDirectories();
+
+ // Delete maps
+ if(mpPendingEntries != 0)
+ {
+ delete mpPendingEntries;
+ mpPendingEntries = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::DeleteSubDirectories();
+// Purpose: Delete all sub directory entries
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::DeleteSubDirectories()
+{
+ // Delete all pointers
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ i != mSubDirectories.end(); ++i)
+ {
+ delete i->second;
+ }
+
+ // Empty list
+ mSubDirectories.clear();
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &, int64_t, const std::string &, bool)
+// Purpose: Syncronise, recusively, a local directory with the server.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &rParams, int64_t ContainingDirectoryID,
+ const std::string &rLocalPath, bool ThisDirHasJustBeenCreated)
+{
+ // Signal received by daemon?
+ if(rParams.mrDaemon.StopRun())
+ {
+ // Yes. Stop now.
+ THROW_EXCEPTION(BackupStoreException, SignalReceived)
+ }
+
+ // Start by making some flag changes, marking this sync as not done,
+ // and on the immediate sub directories.
+ mSyncDone = false;
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ i != mSubDirectories.end(); ++i)
+ {
+ i->second->mSyncDone = false;
+ }
+
+ // Work out the time in the future after which the file should be uploaded regardless.
+ // This is a simple way to avoid having too many problems with file servers when they have
+ // clients with badly out of sync clocks.
+ rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() + rParams.mMaxFileTimeInFuture;
+
+ // Build the current state checksum to compare against while getting info from dirs
+ // Note checksum is used locally only, so byte order isn't considered.
+ MD5Digest currentStateChecksum;
+
+ // Stat the directory, to get attribute info
+ {
+ struct stat st;
+ if(::stat(rLocalPath.c_str(), &st) != 0)
+ {
+ // The directory has probably been deleted, so just ignore this error.
+ // In a future scan, this deletion will be noticed, deleted from server, and this object deleted.
+ TRACE1("Stat failed for '%s' (directory)\n", rLocalPath.c_str());
+ return;
+ }
+ // Store inode number in map so directories are tracked in case they're renamed
+ {
+ BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+ idMap.AddToMap(st.st_ino, mObjectID, ContainingDirectoryID);
+ }
+ // Add attributes to checksum
+ currentStateChecksum.Add(&st.st_mode, sizeof(st.st_mode));
+ currentStateChecksum.Add(&st.st_uid, sizeof(st.st_uid));
+ currentStateChecksum.Add(&st.st_gid, sizeof(st.st_gid));
+ // Inode to be paranoid about things moving around
+ currentStateChecksum.Add(&st.st_ino, sizeof(st.st_ino));
+#ifndef PLATFORM_stat_NO_st_flags
+ currentStateChecksum.Add(&st.st_flags, sizeof(st.st_flags));
+#endif // n PLATFORM_stat_NO_st_flags
+ }
+
+ // Read directory entries, building arrays of names
+ // First, need to read the contents of the directory.
+ std::vector<std::string> dirs;
+ std::vector<std::string> files;
+ bool downloadDirectoryRecordBecauseOfFutureFiles = false;
+ // BLOCK
+ {
+ // read the contents...
+ DIR *dirHandle = 0;
+ try
+ {
+ dirHandle = ::opendir(rLocalPath.c_str());
+ if(dirHandle == 0)
+ {
+ // Report the error (logs and eventual email to administrator)
+ SetErrorWhenReadingFilesystemObject(rParams, rLocalPath.c_str());
+ // Ignore this directory for now.
+ return;
+ }
+
+ // Basic structure for checksum info
+ struct {
+ box_time_t mModificationTime;
+ box_time_t mAttributeModificationTime;
+ int64_t mSize;
+ // And then the name follows
+ } checksum_info;
+ // Be paranoid about structure packing
+ ::memset(&checksum_info, 0, sizeof(checksum_info));
+
+ struct dirent *en = 0;
+ struct stat st;
+ std::string filename;
+ while((en = ::readdir(dirHandle)) != 0)
+ {
+ // Don't need to use LinuxWorkaround_FinishDirentStruct(en, rLocalPath.c_str());
+ // on Linux, as a stat is performed to get all this info
+
+ if(en->d_name[0] == '.' &&
+ (en->d_name[1] == '\0' || (en->d_name[1] == '.' && en->d_name[2] == '\0')))
+ {
+ // ignore, it's . or ..
+ continue;
+ }
+
+ // Stat file to get info
+ filename = rLocalPath + DIRECTORY_SEPARATOR + en->d_name;
+ if(::lstat(filename.c_str(), &st) != 0)
+ {
+ TRACE1("Stat failed for '%s' (contents)\n", filename.c_str());
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ int type = st.st_mode & S_IFMT;
+ if(type == S_IFREG || type == S_IFLNK)
+ {
+ // File or symbolic link
+
+ // Exclude it?
+ if(rParams.mrContext.ExcludeFile(filename))
+ {
+ // Next item!
+ continue;
+ }
+
+ // Store on list
+ files.push_back(std::string(en->d_name));
+ }
+ else if(type == S_IFDIR)
+ {
+ // Directory
+
+ // Exclude it?
+ if(rParams.mrContext.ExcludeDir(filename))
+ {
+ // Next item!
+ continue;
+ }
+
+ // Store on list
+ dirs.push_back(std::string(en->d_name));
+ }
+ else
+ {
+ continue;
+ }
+
+ // Here if the object is something to back up (file, symlink or dir, not excluded)
+ // So make the information for adding to the checksum
+ checksum_info.mModificationTime = FileModificationTime(st);
+ checksum_info.mAttributeModificationTime = FileAttrModificationTime(st);
+ checksum_info.mSize = st.st_size;
+ currentStateChecksum.Add(&checksum_info, sizeof(checksum_info));
+ currentStateChecksum.Add(en->d_name, strlen(en->d_name));
+
+ // If the file has been modified madly into the future, download the
+ // directory record anyway to ensure that it doesn't get uploaded
+ // every single time the disc is scanned.
+ if(checksum_info.mModificationTime > rParams.mUploadAfterThisTimeInTheFuture)
+ {
+ downloadDirectoryRecordBecauseOfFutureFiles = true;
+ // Log that this has happened
+ if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
+ {
+ ::syslog(LOG_ERR, "Some files have modification times excessively in the future. Check clock syncronisation.\n");
+ ::syslog(LOG_ERR, "Example file (only one shown) : %s\n", filename.c_str());
+ rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
+ }
+ }
+ }
+
+ if(::closedir(dirHandle) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ dirHandle = 0;
+ }
+ catch(...)
+ {
+ if(dirHandle != 0)
+ {
+ ::closedir(dirHandle);
+ }
+ throw;
+ }
+ }
+
+ // Finish off the checksum, and compare with the one currently stored
+ bool checksumDifferent = true;
+ currentStateChecksum.Finish();
+ if(mInitialSyncDone && currentStateChecksum.DigestMatches(mStateChecksum))
+ {
+ // The checksum is the same, and there was one to compare with
+ checksumDifferent = false;
+ }
+
+ // Pointer to potentially downloaded store directory info
+ BackupStoreDirectory *pdirOnStore = 0;
+
+ try
+ {
+ // Want to get the directory listing?
+ if(ThisDirHasJustBeenCreated)
+ {
+ // Avoid sending another command to the server when we know it's empty
+ pdirOnStore = new BackupStoreDirectory(mObjectID, ContainingDirectoryID);
+ }
+ else
+ {
+ // Consider asking the store for it
+ if(!mInitialSyncDone || checksumDifferent || downloadDirectoryRecordBecauseOfFutureFiles)
+ {
+ pdirOnStore = FetchDirectoryListing(rParams);
+ }
+ }
+
+ // Make sure the attributes are up to date -- if there's space on the server
+ // and this directory has not just been created (because it's attributes will be correct in this case)
+ // and the checksum is different, implying they *MIGHT* be different.
+ if((!ThisDirHasJustBeenCreated) && checksumDifferent && (!rParams.mrContext.StorageLimitExceeded()))
+ {
+ UpdateAttributes(rParams, pdirOnStore, rLocalPath);
+ }
+
+ // Create the list of pointers to directory entries
+ std::vector<BackupStoreDirectory::Entry *> entriesLeftOver;
+ if(pdirOnStore)
+ {
+ entriesLeftOver.resize(pdirOnStore->GetNumberOfEntries(), 0);
+ BackupStoreDirectory::Iterator i(*pdirOnStore);
+ // Copy in pointers to all the entries
+ for(unsigned int l = 0; l < pdirOnStore->GetNumberOfEntries(); ++l)
+ {
+ entriesLeftOver[l] = i.Next();
+ }
+ }
+
+ // Do the directory reading
+ bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath, pdirOnStore, entriesLeftOver, files, dirs);
+
+ // LAST THING! (think exception safety)
+ // Store the new checksum -- don't fetch things unnecessarily in the future
+ // But... only if 1) the storage limit isn't exceeded -- make sure things are done again if
+ // the directory is modified later
+ // and 2) All the objects within the directory were stored successfully.
+ if(!rParams.mrContext.StorageLimitExceeded() && updateCompleteSuccess)
+ {
+ currentStateChecksum.CopyDigestTo(mStateChecksum);
+ }
+ }
+ catch(...)
+ {
+ // Bad things have happened -- clean up
+ if(pdirOnStore != 0)
+ {
+ delete pdirOnStore;
+ pdirOnStore = 0;
+ }
+
+ // Set things so that we get a full go at stuff later
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+
+ throw;
+ }
+
+ // Clean up directory on store
+ if(pdirOnStore != 0)
+ {
+ delete pdirOnStore;
+ pdirOnStore = 0;
+ }
+
+ // Flag things as having happened.
+ mInitialSyncDone = true;
+ mSyncDone = true;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &)
+// Purpose: Fetch the directory listing of this directory from the store.
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory *BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &rParams)
+{
+ BackupStoreDirectory *pdir = 0;
+
+ try
+ {
+ // Get connection to store
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Query the directory
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
+ mObjectID,
+ BackupProtocolClientListDirectory::Flags_INCLUDE_EVERYTHING, // both files and directories
+ BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
+ true /* want attributes */));
+
+ // Retrieve the directory from the stream following
+ pdir = new BackupStoreDirectory;
+ ASSERT(pdir != 0);
+ std::auto_ptr<IOStream> dirstream(connection.ReceiveStream());
+ pdir->ReadFromStream(*dirstream, connection.GetTimeout());
+ }
+ catch(...)
+ {
+ delete pdir;
+ pdir = 0;
+ throw;
+ }
+
+ return pdir;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &, const std::string &)
+// Purpose: Sets the attributes of the directory on the store, if necessary
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath)
+{
+ // Get attributes for the directory
+ BackupClientFileAttributes attr;
+ box_time_t attrModTime = 0;
+ attr.ReadAttributes(rLocalPath.c_str(), true /* directories have zero mod times */,
+ 0 /* no modification time */, &attrModTime);
+
+ // Assume attributes need updating, unless proved otherwise
+ bool updateAttr = true;
+
+ // Got a listing to compare with?
+ ASSERT(pDirOnStore == 0 || (pDirOnStore != 0 && pDirOnStore->HasAttributes()));
+ if(pDirOnStore != 0 && pDirOnStore->HasAttributes())
+ {
+ const StreamableMemBlock &storeAttrEnc(pDirOnStore->GetAttributes());
+ // Explict decryption
+ BackupClientFileAttributes storeAttr(storeAttrEnc);
+ // Compare the attributes
+ if(attr.Compare(storeAttr, true, true /* ignore both modification times */))
+ {
+ // No update necessary
+ updateAttr = false;
+ }
+ }
+
+ // Update them?
+ if(updateAttr)
+ {
+ // Get connection to store
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Exception thrown if this doesn't work
+ MemBlockStream attrStream(attr);
+ connection.QueryChangeDirAttributes(mObjectID, attrModTime, attrStream);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &, const std::string &, BackupStoreDirectory *, std::vector<BackupStoreDirectory::Entry *> &)
+// Purpose: Update the items stored on the server. The rFiles vector will be erased after it's used to save space.
+// Returns true if all items were updated successfully. (If not, the failures will have been logged).
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
+ std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs)
+{
+ bool allUpdatedSuccessfully = true;
+
+ // Decrypt all the directory entries.
+ // It would be nice to be able to just compare the encrypted versions, however this doesn't work
+ // in practise because there can be multiple encodings of the same filename using different
+ // methods (although each method will result in the same string for the same filename.) This
+ // happens when the server fixes a broken store, and gives plain text generated filenames.
+ // So if we didn't do things like this, then you wouldn't be able to recover from bad things
+ // happening with the server.
+ DecryptedEntriesMap_t decryptedEntries;
+ if(pDirOnStore != 0)
+ {
+ BackupStoreDirectory::Iterator i(*pDirOnStore);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next()) != 0)
+ {
+ decryptedEntries[BackupStoreFilenameClear(en->GetName()).GetClearFilename()] = en;
+ }
+ }
+
+ // Do files
+ for(std::vector<std::string>::const_iterator f = rFiles.begin();
+ f != rFiles.end(); ++f)
+ {
+ // Filename of this file
+ std::string filename(rLocalPath + DIRECTORY_SEPARATOR + *f);
+
+ // Get relevant info about file
+ box_time_t modTime = 0;
+ uint64_t attributesHash = 0;
+ int64_t fileSize = 0;
+ ino_t inodeNum = 0;
+ bool hasMultipleHardLinks = true;
+ // BLOCK
+ {
+ // Stat the file
+ struct stat st;
+ if(::lstat(filename.c_str(), &st) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ // Extract required data
+ modTime = FileModificationTime(st);
+ fileSize = st.st_size;
+ inodeNum = st.st_ino;
+ hasMultipleHardLinks = (st.st_nlink > 1);
+ attributesHash = BackupClientFileAttributes::GenerateAttributeHash(st, *f);
+ }
+
+ // See if it's in the listing (if we have one)
+ BackupStoreFilenameClear storeFilename(*f);
+ BackupStoreDirectory::Entry *en = 0;
+ int64_t latestObjectID = 0;
+ if(pDirOnStore != 0)
+ {
+ DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*f));
+ if(i != decryptedEntries.end())
+ {
+ en = i->second;
+ latestObjectID = en->GetObjectID();
+ }
+ }
+
+ // Check that the entry which might have been found is in fact a file
+ if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == 0))
+ {
+ // Directory exists in the place of this file -- sort it out
+ RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore, en->GetObjectID(), *f);
+ en = 0;
+ }
+
+ // Check for renaming?
+ if(pDirOnStore != 0 && en == 0)
+ {
+ // We now know...
+ // 1) File has just been added
+ // 2) It's not in the store
+
+ // Do we know about the inode number?
+ const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ int64_t renameObjectID = 0, renameInDirectory = 0;
+ if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ {
+ // Look up on the server to get the name, to build the local filename
+ std::string localPotentialOldName;
+ bool isDir = false;
+ bool isCurrentVersion = false;
+ box_time_t srvModTime = 0, srvAttributesHash = 0;
+ BackupStoreFilenameClear oldLeafname;
+ if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
+ {
+ // Only interested if it's a file and the latest version
+ if(!isDir && isCurrentVersion)
+ {
+ // Check that the object we found in the ID map doesn't exist on disc
+ struct stat st;
+ if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ {
+ // Doesn't exist locally, but does exist on the server.
+ // Therefore we can safely rename it to this new file.
+
+ // Get the connection to the server
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Rename the existing files (ie include old versions) on the server
+ connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
+ BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
+ storeFilename);
+
+ // Stop the attempt to delete the file in the original location
+ BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
+
+ // Create new entry in the directory for it
+ // -- will be near enough what's actually on the server for the rest to work.
+ en = pDirOnStore->AddEntry(storeFilename, srvModTime, renameObjectID, 0 /* size in blocks unknown, but not needed */,
+ BackupStoreDirectory::Entry::Flags_File, srvAttributesHash);
+
+ // Store the object ID for the inode lookup map later
+ latestObjectID = renameObjectID;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Is it in the mPendingEntries list?
+ box_time_t pendingFirstSeenTime = 0; // ie not seen
+ if(mpPendingEntries != 0)
+ {
+ std::map<std::string, box_time_t>::const_iterator i(mpPendingEntries->find(*f));
+ if(i != mpPendingEntries->end())
+ {
+ // found it -- set flag
+ pendingFirstSeenTime = i->second;
+ }
+ }
+
+ // If pDirOnStore == 0, then this must have been after an initial sync:
+ ASSERT(pDirOnStore != 0 || mInitialSyncDone);
+ // So, if pDirOnStore == 0, then we know that everything before syncPeriodStart
+ // is either on the server, or in the toupload list. If the directory had changed,
+ // we'd have got a directory listing.
+ //
+ // At this point, if (pDirOnStore == 0 && en == 0), we can assume it's on the server with a
+ // mod time < syncPeriodStart, or didn't exist before that time.
+ //
+ // But if en != 0, then we need to compare modification times to avoid uploading it again.
+
+ // Need to update?
+ //
+ // Condition for upload:
+ // modifiction time within sync period
+ // if it's been seen before but not uploaded, is the time from this first sight longer than the MaxUploadWait
+ // and if we know about it from a directory listing, that it hasn't got the same upload time as on the store
+ if(
+ (
+ // Check the file modified within the acceptable time period we're checking
+ // If the file isn't on the server, the acceptable time starts at zero.
+ // Check pDirOnStore and en, because if we didn't download a directory listing,
+ // pDirOnStore will be zero, but we know it's on the server.
+ ( ((pDirOnStore != 0 && en == 0) || (modTime >= rParams.mSyncPeriodStart)) && modTime < rParams.mSyncPeriodEnd)
+
+ // However, just in case things are continually modified, we check the first seen time.
+ // The two compares of syncPeriodEnd and pendingFirstSeenTime are because the values are unsigned.
+ || (pendingFirstSeenTime != 0 &&
+ (rParams.mSyncPeriodEnd > pendingFirstSeenTime)
+ && ((rParams.mSyncPeriodEnd - pendingFirstSeenTime) > rParams.mMaxUploadWait))
+
+ // Then make sure that if files are added with a time less than the sync period start
+ // (which can easily happen on file server), it gets uploaded. The directory contents checksum
+ // will pick up the fact it has been added, so the store listing will be available when this happens.
+ || ((modTime <= rParams.mSyncPeriodStart) && (en != 0) && (en->GetModificationTime() != modTime))
+
+ // And just to catch really badly off clocks in the future for file server clients,
+ // just upload the file if it's madly in the future.
+ || (modTime > rParams.mUploadAfterThisTimeInTheFuture)
+ )
+ // But even then, only upload it if the mod time locally is different to that on the server.
+ && (en == 0 || en->GetModificationTime() != modTime))
+ {
+ // Make sure we're connected -- must connect here so we know whether
+ // the storage limit has been exceeded, and hence whether or not
+ // to actually upload the file.
+ rParams.mrContext.GetConnection();
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Upload the file to the server, recording the object ID it returns
+ bool noPreviousVersionOnServer = ((pDirOnStore != 0) && (en == 0));
+
+ // Surround this in a try/catch block, to catch errrors, but still continue
+ bool uploadSuccess = false;
+ try
+ {
+ latestObjectID = UploadFile(rParams, filename, storeFilename, fileSize, modTime, attributesHash, noPreviousVersionOnServer);
+ uploadSuccess = true;
+ }
+ catch(ConnectionException &e)
+ {
+ // Connection errors should just be passed on to the main handler, retries
+ // would probably just cause more problems.
+ throw;
+ }
+ catch(BoxException &e)
+ {
+ // an error occured -- make return code false, to show error in directory
+ allUpdatedSuccessfully = false;
+ // Log it.
+ SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
+ // Log error.
+ ::syslog(LOG_ERR, "Error code when uploading was (%d/%d), %s", e.GetType(), e.GetSubType(), e.what());
+ }
+
+ // Update structures if the file was uploaded successfully.
+ if(uploadSuccess)
+ {
+ // delete from pending entries
+ if(pendingFirstSeenTime != 0 && mpPendingEntries != 0)
+ {
+ mpPendingEntries->erase(*f);
+ }
+ }
+ }
+ }
+ else if(en != 0 && en->GetAttributesHash() != attributesHash)
+ {
+ // Attributes have probably changed, upload them again.
+ // If the attributes have changed enough, the directory hash will have changed too,
+ // and so the dir will have been downloaded, and the entry will be available.
+
+ // Get connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Update store
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
+ MemBlockStream attrStream(attr);
+ connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
+ }
+ }
+
+ if(modTime >= rParams.mSyncPeriodEnd)
+ {
+ // Allocate?
+ if(mpPendingEntries == 0)
+ {
+ mpPendingEntries = new std::map<std::string, box_time_t>;
+ }
+ // Adding to mPendingEntries list
+ if(pendingFirstSeenTime == 0)
+ {
+ // Haven't seen this before -- add to list!
+ (*mpPendingEntries)[*f] = modTime;
+ }
+ }
+
+ // Zero pointer in rEntriesLeftOver, if we have a pointer to zero
+ if(en != 0)
+ {
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] == en)
+ {
+ rEntriesLeftOver[l] = 0;
+ break;
+ }
+ }
+ }
+
+ // Does this file need an entry in the ID map?
+ if(fileSize >= rParams.mFileTrackingSizeThreshold)
+ {
+ // Get the map
+ BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+
+ // Need to get an ID from somewhere...
+ if(latestObjectID != 0)
+ {
+ // Use this one
+ idMap.AddToMap(inodeNum, latestObjectID, mObjectID /* containing directory */);
+ }
+ else
+ {
+ // Don't know it -- haven't sent anything to the store, and didn't get a listing.
+ // Look it up in the current map, and if it's there, use that.
+ const BackupClientInodeToIDMap &currentIDMap(rParams.mrContext.GetCurrentIDMap());
+ int64_t objid = 0, dirid = 0;
+ if(currentIDMap.Lookup(inodeNum, objid, dirid))
+ {
+ // Found
+ ASSERT(dirid == mObjectID);
+ // NOTE: If the above assert fails, an inode number has been reused by the OS,
+ // or there is a problem somewhere. If this happened on a short test run, look
+ // into it. However, in a long running process this may happen occasionally and
+ // not indiciate anything wrong.
+ // Run the release version for real life use, where this check is not made.
+ idMap.AddToMap(inodeNum, objid, mObjectID /* containing directory */);
+ }
+ }
+ }
+ }
+
+ // Erase contents of files to save space when recursing
+ rFiles.clear();
+
+ // Delete the pending entries, if the map is entry
+ if(mpPendingEntries != 0 && mpPendingEntries->size() == 0)
+ {
+ TRACE1("Deleting mpPendingEntries from dir ID %lld\n", mObjectID);
+ delete mpPendingEntries;
+ mpPendingEntries = 0;
+ }
+
+ // Do directories
+ for(std::vector<std::string>::const_iterator d = rDirs.begin();
+ d != rDirs.end(); ++d)
+ {
+ // Get the local filename
+ std::string dirname(rLocalPath + DIRECTORY_SEPARATOR + *d);
+
+ // See if it's in the listing (if we have one)
+ BackupStoreFilenameClear storeFilename(*d);
+ BackupStoreDirectory::Entry *en = 0;
+ if(pDirOnStore != 0)
+ {
+ DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*d));
+ if(i != decryptedEntries.end())
+ {
+ en = i->second;
+ }
+ }
+
+ // Check that the entry which might have been found is in fact a directory
+ if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == 0))
+ {
+ // Entry exists, but is not a directory. Bad. Get rid of it.
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ connection.QueryDeleteFile(mObjectID /* in directory */, storeFilename);
+
+ // Nothing found
+ en = 0;
+ }
+
+ // Flag for having created directory, so can optimise the recusive call not to
+ // read it again, because we know it's empty.
+ bool haveJustCreatedDirOnServer = false;
+
+ // Next, see if it's in the list of sub directories
+ BackupClientDirectoryRecord *psubDirRecord = 0;
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(*d));
+ if(e != mSubDirectories.end())
+ {
+ // In the list, just use this pointer
+ psubDirRecord = e->second;
+ }
+ else if(!rParams.mrContext.StorageLimitExceeded()) // know we've got a connection if we get this far, as dir will have been modified.
+ {
+ // Note: only think about adding directory records if there's space left on the server.
+ // If there isn't, this step will be repeated when there is some available.
+
+ // Need to create the record. But do we need to create the directory on the server?
+ int64_t subDirObjectID = 0;
+ if(en != 0)
+ {
+ // No. Exists on the server, and we know about it from the listing.
+ subDirObjectID = en->GetObjectID();
+ }
+ else
+ {
+ // Yes, creation required!
+ // It is known that the it doesn't exist:
+ // if pDirOnStore == 0, then the directory has had an initial sync, and hasn't been modified.
+ // so it has definately been created already.
+ // if en == 0 but pDirOnStore != 0, well... obviously it doesn't exist.
+
+ // Get attributes
+ box_time_t attrModTime = 0;
+ ino_t inodeNum = 0;
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(dirname.c_str(), true /* directories have zero mod times */,
+ 0 /* not interested in mod time */, &attrModTime, 0 /* not file size */,
+ &inodeNum);
+
+ // Check to see if the directory been renamed
+ // First, do we have a record in the ID map?
+ int64_t renameObjectID = 0, renameInDirectory = 0;
+ bool renameDir = false;
+ const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ {
+ // Look up on the server to get the name, to build the local filename
+ std::string localPotentialOldName;
+ bool isDir = false;
+ bool isCurrentVersion = false;
+ if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
+ {
+ // Only interested if it's a directory
+ if(isDir && isCurrentVersion)
+ {
+ // Check that the object doesn't exist already
+ struct stat st;
+ if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ {
+ // Doesn't exist locally, but does exist on the server.
+ // Therefore we can safely rename it.
+ renameDir = true;
+ }
+ }
+ }
+ }
+
+ // Get connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Don't do a check for storage limit exceeded here, because if we get to this
+ // stage, a connection will have been opened, and the status known, so the check
+ // in the else if(...) above will be correct.
+
+ // Build attribute stream for sending
+ MemBlockStream attrStream(attr);
+
+ if(renameDir)
+ {
+ // Rename the existing directory on the server
+ connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
+ BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
+ storeFilename);
+
+ // Put the latest attributes on it
+ connection.QueryChangeDirAttributes(renameObjectID, attrModTime, attrStream);
+
+ // Stop it being deleted later
+ BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ rdelList.StopDirectoryDeletion(renameObjectID);
+
+ // This is the ID for the renamed directory
+ subDirObjectID = renameObjectID;
+ }
+ else
+ {
+ // Create a new directory
+ std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
+ mObjectID, attrModTime, storeFilename, attrStream));
+ subDirObjectID = dirCreate->GetObjectID();
+
+ // Flag as having done this for optimisation later
+ haveJustCreatedDirOnServer = true;
+ }
+ }
+
+ // New an object for this
+ psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
+
+ // Store in list
+ try
+ {
+ mSubDirectories[*d] = psubDirRecord;
+ }
+ catch(...)
+ {
+ delete psubDirRecord;
+ psubDirRecord = 0;
+ throw;
+ }
+ }
+
+ ASSERT(psubDirRecord != 0 || rParams.mrContext.StorageLimitExceeded());
+
+ if(psubDirRecord)
+ {
+ // Sync this sub directory too
+ psubDirRecord->SyncDirectory(rParams, mObjectID, dirname, haveJustCreatedDirOnServer);
+ }
+
+ // Zero pointer in rEntriesLeftOver, if we have a pointer to zero
+ if(en != 0)
+ {
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] == en)
+ {
+ rEntriesLeftOver[l] = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ // Delete everything which is on the store, but not on disc
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] != 0)
+ {
+ BackupStoreDirectory::Entry *en = rEntriesLeftOver[l];
+
+ // These entries can't be deleted immediately, as it would prevent
+ // renaming and moving of objects working properly. So we add them
+ // to a list, which is actually deleted at the very end of the session.
+ // If there's an error during the process, it doesn't matter if things
+ // aren't actually deleted, as the whole state will be reset anyway.
+ BackupClientDeleteList &rdel(rParams.mrContext.GetDeleteList());
+
+ // Delete this entry -- file or directory?
+ if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
+ {
+ // Set a pending deletion for the file
+ rdel.AddFileDelete(mObjectID, en->GetName());
+ }
+ else if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
+ {
+ // Set as a pending deletion for the directory
+ rdel.AddDirectoryDelete(en->GetObjectID());
+
+ // If there's a directory record for it in the sub directory map, delete it now
+ BackupStoreFilenameClear dirname(en->GetName());
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(dirname.GetClearFilename()));
+ if(e != mSubDirectories.end())
+ {
+ // Carefully delete the entry from the map
+ BackupClientDirectoryRecord *rec = e->second;
+ mSubDirectories.erase(e);
+ delete rec;
+ TRACE2("Deleted directory record for %s/%s\n", rLocalPath.c_str(), dirname.GetClearFilename().c_str());
+ }
+ }
+ }
+ }
+
+ // Return success flag (will be false if some files failed)
+ return allUpdatedSuccessfully;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &, BackupStoreDirectory *, int64_t, const std::string &)
+// Purpose: Called to resolve difficulties when a directory is found on the
+// store where a file is to be uploaded.
+// Created: 9/7/04
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename)
+{
+ // First, delete the directory
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ connection.QueryDeleteDirectory(ObjectID);
+
+ // Then, delete any directory record
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(rFilename));
+ if(e != mSubDirectories.end())
+ {
+ // A record exists for this, remove it
+ BackupClientDirectoryRecord *psubDirRecord = e->second;
+ mSubDirectories.erase(e);
+
+ // And delete the object
+ delete psubDirRecord;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &, const std::string &, const BackupStoreFilename &, int64_t, box_time_t, box_time_t, bool)
+// Purpose: Private. Upload a file to the server -- may send a patch instead of the whole thing
+// Created: 20/1/04
+//
+// --------------------------------------------------------------------------
+int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer)
+{
+ // Get the connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Info
+ int64_t objID = 0;
+ bool doNormalUpload = true;
+
+ // Use a try block to catch store full errors
+ try
+ {
+ // Might an old version be on the server, and is the file size over the diffing threshold?
+ if(!NoPreviousVersionOnServer && FileSize >= rParams.mDiffingUploadSizeThreshold)
+ {
+ // YES -- try to do diff, if possible
+ // First, query the server to see if there's an old version available
+ std::auto_ptr<BackupProtocolClientSuccess> getBlockIndex(connection.QueryGetBlockIndexByName(mObjectID, rStoreFilename));
+ int64_t diffFromID = getBlockIndex->GetObjectID();
+
+ if(diffFromID != 0)
+ {
+ // Found an old version -- get the index
+ std::auto_ptr<IOStream> blockIndexStream(connection.ReceiveStream());
+
+ // Diff the file
+ bool isCompletelyDifferent = false;
+ std::auto_ptr<IOStream> patchStream(BackupStoreFile::EncodeFileDiff(rFilename.c_str(),
+ mObjectID, /* containing directory */
+ rStoreFilename, diffFromID, *blockIndexStream,
+ connection.GetTimeout(), 0 /* not interested in the modification time */, &isCompletelyDifferent));
+
+ // Upload the patch to the store
+ std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
+ AttributesHash, isCompletelyDifferent?(0):(diffFromID), rStoreFilename, *patchStream));
+
+ // Don't attempt to upload it again!
+ doNormalUpload = false;
+ }
+ }
+
+ if(doNormalUpload)
+ {
+ // below threshold or nothing to diff from, so upload whole
+
+ // Prepare to upload, getting a stream which will encode the file as we go along
+ std::auto_ptr<IOStream> upload(BackupStoreFile::EncodeFile(rFilename.c_str(), mObjectID, rStoreFilename));
+
+ // Send to store
+ std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
+ AttributesHash, 0 /* no diff from file ID */, rStoreFilename, *upload));
+
+ // Get object ID from the result
+ objID = stored->GetObjectID();
+ }
+ }
+ catch(BoxException &e)
+ {
+ if(e.GetType() == ConnectionException::ExceptionType && e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
+ {
+ // Check and see what error the protocol has -- as it might be an error...
+ int type, subtype;
+ if(connection.GetLastError(type, subtype)
+ && type == BackupProtocolClientError::ErrorType
+ && subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
+ {
+ // The hard limit was exceeded on the server, notify!
+ rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ }
+ }
+
+ // Send the error on it's way
+ throw;
+ }
+
+ // Return the new object ID of this file
+ return objID;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(SyncParams &, const char *)
+// Purpose: Sets the error state when there were problems reading an object
+// from the filesystem.
+// Created: 29/3/04
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClientDirectoryRecord::SyncParams &rParams, const char *Filename)
+{
+ // Zero hash, so it gets synced properly next time round.
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+
+ // Log the error
+ ::syslog(LOG_ERR, "Backup object failed, error when reading %s", Filename);
+
+ // Mark that an error occured in the parameters object
+ rParams.mReadErrorsOnFilesystemObjects = true;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncParams::SyncParams(BackupClientContext &)
+// Purpose: Constructor
+// Created: 8/3/04
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext)
+ : mSyncPeriodStart(0),
+ mSyncPeriodEnd(0),
+ mMaxUploadWait(0),
+ mMaxFileTimeInFuture(99999999999999999LL),
+ mFileTrackingSizeThreshold(16*1024),
+ mDiffingUploadSizeThreshold(16*1024),
+ mrDaemon(rDaemon),
+ mrContext(rContext),
+ mReadErrorsOnFilesystemObjects(false),
+ mUploadAfterThisTimeInTheFuture(99999999999999999LL),
+ mHaveLoggedWarningAboutFutureFileTimes(false)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncParams::~SyncParams()
+// Purpose: Destructor
+// Created: 8/3/04
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::SyncParams::~SyncParams()
+{
+}
+
+
+
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.h b/bin/bbackupd/BackupClientDirectoryRecord.h
new file mode 100755
index 00000000..99354bc8
--- /dev/null
+++ b/bin/bbackupd/BackupClientDirectoryRecord.h
@@ -0,0 +1,115 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDirectoryRecord.h
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTDIRECTORYRECORD__H
+#define BACKUPCLIENTDIRECTORYRECORD__H
+
+#include <string>
+#include <map>
+
+#include "BoxTime.h"
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreDirectory.h"
+#include "MD5Digest.h"
+
+class BackupClientContext;
+class BackupDaemon;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientDirectoryRecord
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupClientDirectoryRecord
+{
+public:
+ BackupClientDirectoryRecord(int64_t ObjectID, const std::string &rSubDirName);
+ ~BackupClientDirectoryRecord();
+private:
+ BackupClientDirectoryRecord(const BackupClientDirectoryRecord &);
+public:
+
+ enum
+ {
+ UnknownDirectoryID = 0
+ };
+
+ // --------------------------------------------------------------------------
+ //
+ // Class
+ // Name: BackupClientDirectoryRecord::SyncParams
+ // Purpose: Holds parameters etc for directory syncing. Not passed as
+ // const, some parameters may be modified during sync.
+ // Created: 8/3/04
+ //
+ // --------------------------------------------------------------------------
+ class SyncParams
+ {
+ public:
+ SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext);
+ ~SyncParams();
+ private:
+ // No copying
+ SyncParams(const SyncParams&);
+ SyncParams &operator=(const SyncParams&);
+ public:
+
+ // Data members are public, as accessors are not justified here
+ box_time_t mSyncPeriodStart;
+ box_time_t mSyncPeriodEnd;
+ box_time_t mMaxUploadWait;
+ box_time_t mMaxFileTimeInFuture;
+ int32_t mFileTrackingSizeThreshold;
+ int32_t mDiffingUploadSizeThreshold;
+ BackupDaemon &mrDaemon;
+ BackupClientContext &mrContext;
+ bool mReadErrorsOnFilesystemObjects;
+
+ // Member variables modified by syncing process
+ box_time_t mUploadAfterThisTimeInTheFuture;
+ bool mHaveLoggedWarningAboutFutureFileTimes;
+ };
+
+ void SyncDirectory(SyncParams &rParams, int64_t ContainingDirectoryID, const std::string &rLocalPath,
+ bool ThisDirHasJustBeenCreated = false);
+
+private:
+ void DeleteSubDirectories();
+ BackupStoreDirectory *FetchDirectoryListing(SyncParams &rParams);
+ void UpdateAttributes(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath);
+ bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
+ std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs);
+ int64_t UploadFile(SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer);
+ void SetErrorWhenReadingFilesystemObject(SyncParams &rParams, const char *Filename);
+ void RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename);
+
+private:
+ int64_t mObjectID;
+ std::string mSubDirName;
+ bool mInitialSyncDone;
+ bool mSyncDone;
+
+ // Checksum of directory contents and attributes, used to detect changes
+ uint8_t mStateChecksum[MD5Digest::DigestLength];
+
+ std::map<std::string, box_time_t> *mpPendingEntries;
+ std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
+ // mpPendingEntries is a pointer rather than simple a member
+ // variables, because most of the time it'll be empty. This would waste a lot
+ // of memory because of STL allocation policies.
+};
+
+#endif // BACKUPCLIENTDIRECTORYRECORD__H
+
+
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.cpp b/bin/bbackupd/BackupClientInodeToIDMap.cpp
new file mode 100755
index 00000000..23e91eba
--- /dev/null
+++ b/bin/bbackupd/BackupClientInodeToIDMap.cpp
@@ -0,0 +1,279 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientInodeToIDMap.cpp
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#ifndef PLATFORM_BERKELEY_DB_NOT_SUPPORTED
+ // Include db headers and other OS files if they're needed for the disc implementation
+ #include <sys/types.h>
+ #include <fcntl.h>
+ #include <limits.h>
+ #ifdef PLATFORM_LINUX
+ #include "../../local/_linux_db.h"
+ #else
+ #include <db.h>
+ #endif
+ #include <sys/stat.h>
+#endif
+
+#define BACKIPCLIENTINODETOIDMAP_IMPLEMENTATION
+#include "BackupClientInodeToIDMap.h"
+
+#include "BackupStoreException.h"
+
+
+#include "MemLeakFindOn.h"
+
+// What type of Berkeley DB shall we use?
+#define TABLE_DATABASE_TYPE DB_HASH
+
+typedef struct
+{
+ int64_t mObjectID;
+ int64_t mInDirectory;
+} IDBRecord;
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::BackupClientInodeToIDMap()
+// Purpose: Constructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap::BackupClientInodeToIDMap()
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ : mReadOnly(true),
+ mEmpty(false),
+ dbp(0)
+#endif
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::~BackupClientInodeToIDMap()
+// Purpose: Destructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap::~BackupClientInodeToIDMap()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ if(dbp != 0)
+ {
+ dbp->close(dbp);
+ }
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Open(const char *, bool, bool)
+// Purpose: Open the database map, creating a file on disc to store everything
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::Open(const char *Filename, bool ReadOnly, bool CreateNew)
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ // Correct arguments?
+ ASSERT(!(CreateNew && ReadOnly));
+
+ // Correct usage?
+ ASSERT(dbp == 0);
+ ASSERT(!mEmpty);
+
+ // Open the database file
+ dbp = dbopen(Filename, (CreateNew?O_CREAT:0) | (ReadOnly?O_RDONLY:O_RDWR), S_IRUSR | S_IWUSR | S_IRGRP, TABLE_DATABASE_TYPE, NULL);
+ if(dbp == NULL)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+
+ // Read only flag
+ mReadOnly = ReadOnly;
+#endif
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::OpenEmpty()
+// Purpose: 'Open' this map. Not associated with a disc file. Useful for when a map
+// is required, but is against an empty file on disc which shouldn't be created.
+// Implies read only.
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::OpenEmpty()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ ASSERT(dbp == 0);
+ mEmpty = true;
+ mReadOnly = true;
+#endif
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Close()
+// Purpose: Close the database file
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::Close()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ if(dbp != 0)
+ {
+ if(dbp->close(dbp) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+ dbp = 0;
+ }
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::AddToMap(InodeRefType, int64_t, int64_t)
+// Purpose: Adds an entry to the map. Overwrites any existing entry.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::AddToMap(InodeRefType InodeRef, int64_t ObjectID, int64_t InDirectory)
+{
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ mMap[InodeRef] = std::pair<int64_t, int64_t>(ObjectID, InDirectory);
+#else
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapIsReadOnly);
+ }
+
+ if(dbp == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapNotOpen);
+ }
+
+ // Setup structures
+ IDBRecord rec;
+ rec.mObjectID = ObjectID;
+ rec.mInDirectory = InDirectory;
+
+ DBT key;
+ key.data = &InodeRef;
+ key.size = sizeof(InodeRef);
+
+ DBT data;
+ data.data = &rec;
+ data.size = sizeof(rec);
+
+ // Add to map (or replace existing entry)
+ if(dbp->put(dbp, &key, &data, 0) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+#endif
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Lookup(InodeRefType, int64_t &, int64_t &) const
+// Purpose: Looks up an inode in the map, returning true if it exists, and the object
+// ids of it and the directory it's in the reference arguments.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupClientInodeToIDMap::Lookup(InodeRefType InodeRef, int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const
+{
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ std::map<InodeRefType, std::pair<int64_t, int64_t> >::const_iterator i(mMap.find(InodeRef));
+
+ // Found?
+ if(i == mMap.end())
+ {
+ return false;
+ }
+
+ // Yes. Return the details
+ rObjectIDOut = i->second.first;
+ rInDirectoryOut = i->second.second;
+ return true;
+#else
+ if(mEmpty)
+ {
+ // Map is empty
+ return false;
+ }
+
+ if(dbp == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapNotOpen);
+ }
+
+ DBT key;
+ key.data = &InodeRef;
+ key.size = sizeof(InodeRef);
+
+ DBT data;
+ data.data = 0;
+ data.size = 0;
+
+ switch(dbp->get(dbp, &key, &data, 0))
+ {
+ case 1: // key not in file
+ return false;
+
+ case -1: // error
+ default: // not specified in docs
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ return false;
+
+ case 0: // success, found it
+ break;
+ }
+
+ // Check for sensible return
+ if(key.data == 0 || data.size != sizeof(IDBRecord))
+ {
+ // Assert in debug version
+ ASSERT(key.data == 0 || data.size != sizeof(IDBRecord));
+
+ // Invalid entries mean it wasn't found
+ return false;
+ }
+
+ // Data alignment isn't guarentted to be on a suitable bounday
+ IDBRecord rec;
+ ::memcpy(&rec, data.data, sizeof(rec));
+
+ // Return data
+ rObjectIDOut = rec.mObjectID;
+ rInDirectoryOut = rec.mInDirectory;
+
+ // Don't have to worry about freeing the returned data
+
+ // Found
+ return true;
+#endif
+}
+
+
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.h b/bin/bbackupd/BackupClientInodeToIDMap.h
new file mode 100755
index 00000000..1ea7755d
--- /dev/null
+++ b/bin/bbackupd/BackupClientInodeToIDMap.h
@@ -0,0 +1,67 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientInodeToIDMap.h
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTINODETOIDMAP_H
+#define BACKUPCLIENTINODETOIDMAP__H
+
+#include <sys/types.h>
+
+#include <map>
+#include <utility>
+
+// Use in memory implementation if there isn't access to the Berkely DB on this platform
+#ifdef PLATFORM_BERKELEY_DB_NOT_SUPPORTED
+ #define BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+#endif
+
+typedef ino_t InodeRefType;
+
+// avoid having to include the DB files when not necessary
+#ifndef BACKIPCLIENTINODETOIDMAP_IMPLEMENTATION
+ class DB;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientInodeToIDMap
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+class BackupClientInodeToIDMap
+{
+public:
+ BackupClientInodeToIDMap();
+ ~BackupClientInodeToIDMap();
+private:
+ BackupClientInodeToIDMap(const BackupClientInodeToIDMap &rToCopy); // not allowed
+public:
+
+ void Open(const char *Filename, bool ReadOnly, bool CreateNew);
+ void OpenEmpty();
+
+ void AddToMap(InodeRefType InodeRef, int64_t ObjectID, int64_t InDirectory);
+ bool Lookup(InodeRefType InodeRef, int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const;
+
+ void Close();
+
+private:
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ std::map<InodeRefType, std::pair<int64_t, int64_t> > mMap;
+#else
+ bool mReadOnly;
+ bool mEmpty;
+ DB *dbp; // C style interface, use notation from documentation
+#endif
+};
+
+#endif // BACKUPCLIENTINODETOIDMAP__H
+
+
diff --git a/bin/bbackupd/BackupDaemon.cpp b/bin/bbackupd/BackupDaemon.cpp
new file mode 100755
index 00000000..7aa21a87
--- /dev/null
+++ b/bin/bbackupd/BackupDaemon.cpp
@@ -0,0 +1,1624 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.cpp
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <unistd.h>
+#include <syslog.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <signal.h>
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ #include <mntent.h>
+#endif
+#include <sys/wait.h>
+
+#include "Configuration.h"
+#include "IOStream.h"
+#include "MemBlockStream.h"
+#include "CommonException.h"
+
+#include "SSLLib.h"
+#include "TLSContext.h"
+
+#include "BackupDaemon.h"
+#include "BackupDaemonConfigVerify.h"
+#include "BackupClientContext.h"
+#include "BackupClientDirectoryRecord.h"
+#include "BackupStoreDirectory.h"
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreFilenameClear.h"
+#include "BackupClientInodeToIDMap.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupClientCryptoKeys.h"
+#include "BannerText.h"
+#include "BackupStoreFile.h"
+#include "Random.h"
+#include "ExcludeList.h"
+#include "BackupClientMakeExcludeList.h"
+#include "IOStreamGetLine.h"
+#include "Utils.h"
+#include "FileStream.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "LocalProcessStream.h"
+#include "IOStreamGetLine.h"
+#include "Conversion.h"
+
+#include "MemLeakFindOn.h"
+
+#define MAX_SLEEP_TIME ((unsigned int)1024)
+
+// Make the actual sync period have a little bit of extra time, up to a 64th of the main sync period.
+// This prevents repetative cycles of load on the server
+#define SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY 6
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::BackupDaemon()
+// Purpose: constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupDaemon::BackupDaemon()
+ : mState(BackupDaemon::State_Initialising),
+ mpCommandSocketInfo(0),
+ mDeleteUnusedRootDirEntriesAfter(0)
+{
+ // Only ever one instance of a daemon
+ SSLLib::Initialise();
+
+ // Initialise notifcation sent status
+ for(int l = 0; l <= NotifyEvent__MAX; ++l)
+ {
+ mNotificationsSent[l] = false;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::~BackupDaemon()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupDaemon::~BackupDaemon()
+{
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DaemonName()
+// Purpose: Get name of daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+const char *BackupDaemon::DaemonName() const
+{
+ return "bbackupd";
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DaemonBanner()
+// Purpose: Daemon banner
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+const char *BackupDaemon::DaemonBanner() const
+{
+#ifndef NDEBUG
+ // Don't display banner in debug builds
+ return 0;
+#else
+ return BANNER_TEXT("Backup Client");
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::GetConfigVerify()
+// Purpose: Get configuration specification
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+const ConfigurationVerify *BackupDaemon::GetConfigVerify() const
+{
+ // Defined elsewhere
+ return &BackupDaemonConfigVerify;
+}
+
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupInInitialProcess()
+// Purpose: Platforms with non-checkable credientals on local sockets only.
+// Prints a warning if the command socket is used.
+// Created: 25/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupInInitialProcess()
+{
+ // Print a warning on this platform if the CommandSocket is used.
+ if(GetConfiguration().KeyExists("CommandSocket"))
+ {
+ printf(
+ "============================================================================================\n" \
+ "SECURITY WARNING: This platform cannot check the credentials of connections to the\n" \
+ "command socket. This is a potential DoS security problem.\n" \
+ "Remove the CommandSocket directive from the bbackupd.conf file if bbackupctl is not used.\n" \
+ "============================================================================================\n"
+ );
+ }
+}
+#endif
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteAllLocations()
+// Purpose: Deletes all records stored
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteAllLocations()
+{
+ // Run through, and delete everything
+ for(std::vector<Location *>::iterator i = mLocations.begin();
+ i != mLocations.end(); ++i)
+ {
+ delete *i;
+ }
+
+ // Clear the contents of the map, so it is empty
+ mLocations.clear();
+
+ // And delete everything from the assoicated mount vector
+ mIDMapMounts.clear();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run()
+// Purpose: Run function for daemon
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run()
+{
+ // Ignore SIGPIPE (so that if a command connection is broken, the daemon doesn't terminate)
+ ::signal(SIGPIPE, SIG_IGN);
+
+ // Create a command socket?
+ const Configuration &conf(GetConfiguration());
+ if(conf.KeyExists("CommandSocket"))
+ {
+ // Yes, create a local UNIX socket
+ mpCommandSocketInfo = new CommandSocketInfo;
+ const char *socketName = conf.GetKeyValue("CommandSocket").c_str();
+ ::unlink(socketName);
+ mpCommandSocketInfo->mListeningSocket.Listen(Socket::TypeUNIX, socketName);
+ }
+
+ // Handle things nicely on exceptions
+ try
+ {
+ Run2();
+ }
+ catch(...)
+ {
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+
+ throw;
+ }
+
+ // Clean up
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run2()
+// Purpose: Run function for daemon (second stage)
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run2()
+{
+ // Read in the certificates creating a TLS context
+ TLSContext tlsContext;
+ const Configuration &conf(GetConfiguration());
+ std::string certFile(conf.GetKeyValue("CertificateFile"));
+ std::string keyFile(conf.GetKeyValue("PrivateKeyFile"));
+ std::string caFile(conf.GetKeyValue("TrustedCAsFile"));
+ tlsContext.Initialise(false /* as client */, certFile.c_str(), keyFile.c_str(), caFile.c_str());
+
+ // Set up the keys for various things
+ BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+
+ // Set maximum diffing time?
+ if(conf.KeyExists("MaximumDiffingTime"))
+ {
+ BackupStoreFile::SetMaximumDiffingTime(conf.GetKeyValueInt("MaximumDiffingTime"));
+ }
+
+ // Setup various timings
+
+ // How often to connect to the store (approximate)
+ box_time_t updateStoreInterval = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("UpdateStoreInterval"));
+
+ // But are we connecting automatically?
+ bool automaticBackup = conf.GetKeyValueBool("AutomaticBackup");
+
+ // The minimum age a file needs to be before it will be considered for uploading
+ box_time_t minimumFileAge = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MinimumFileAge"));
+
+ // The maximum time we'll wait to upload a file, regardless of how often it's modified
+ box_time_t maxUploadWait = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MaxUploadWait"));
+ // Adjust by subtracting the minimum file age, so is relative to sync period end in comparisons
+ maxUploadWait = (maxUploadWait > minimumFileAge)?(maxUploadWait - minimumFileAge):(0);
+
+ // When the next sync should take place -- which is ASAP
+ box_time_t nextSyncTime = 0;
+
+ // When the last sync started (only updated if the store was not full when the sync ended)
+ box_time_t lastSyncTime = 0;
+
+ // --------------------------------------------------------------------------------------------
+
+ // And what's the current client store marker?
+ int64_t clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // haven't contacted the store yet
+
+ // Set state
+ SetState(State_Idle);
+
+ // Loop around doing backups
+ do
+ {
+ // Flags used below
+ bool storageLimitExceeded = false;
+ bool doSync = false;
+ bool doSyncForcedByCommand = false;
+
+ // Is a delay necessary?
+ {
+ box_time_t currentTime;
+ do
+ {
+ // Need to check the stop run thing here too, so this loop isn't run if we should be stopping
+ if(StopRun()) break;
+
+ currentTime = GetCurrentBoxTime();
+
+ // Pause a while, but no more than MAX_SLEEP_TIME seconds (use the conditional because times are unsigned)
+ box_time_t requiredDelay = (nextSyncTime < currentTime)?(0):(nextSyncTime - currentTime);
+ // If there isn't automatic backup happening, set a long delay. And limit delays at the same time.
+ if(!automaticBackup || requiredDelay > SecondsToBoxTime((uint32_t)MAX_SLEEP_TIME)) requiredDelay = SecondsToBoxTime((uint32_t)MAX_SLEEP_TIME);
+
+ // Only do the delay if there is a delay required
+ if(requiredDelay > 0)
+ {
+ // Sleep somehow. There are choices on how this should be done, depending on the state of the control connection
+ if(mpCommandSocketInfo != 0)
+ {
+ // A command socket exists, so sleep by handling connections with it
+ WaitOnCommandSocket(requiredDelay, doSync, doSyncForcedByCommand);
+ }
+ else
+ {
+ // No command socket or connection, just do a normal sleep
+ int sleepSeconds = BoxTimeToSeconds(requiredDelay);
+ ::sleep((sleepSeconds <= 0)?1:sleepSeconds);
+ }
+ }
+
+ } while((!automaticBackup || (currentTime < nextSyncTime)) && !doSync && !StopRun());
+ }
+
+ // Time of sync start, and if it's time for another sync (and we're doing automatic syncs), set the flag
+ box_time_t currentSyncStartTime = GetCurrentBoxTime();
+ if(automaticBackup && currentSyncStartTime >= nextSyncTime)
+ {
+ doSync = true;
+ }
+
+ // Use a script to see if sync is allowed now?
+ if(!doSyncForcedByCommand && doSync && !StopRun())
+ {
+ int d = UseScriptToSeeIfSyncAllowed();
+ if(d > 0)
+ {
+ // Script has asked for a delay
+ nextSyncTime = GetCurrentBoxTime() + SecondsToBoxTime((uint32_t)d);
+ doSync = false;
+ }
+ }
+
+ // Ready to sync? (but only if we're not supposed to be stopping)
+ if(doSync && !StopRun())
+ {
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_start");
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(true /* start */);
+
+ // Reset statistics on uploads
+ BackupStoreFile::ResetStats();
+
+ // Calculate the sync period of files to examine
+ box_time_t syncPeriodStart = lastSyncTime;
+ box_time_t syncPeriodEnd = currentSyncStartTime - minimumFileAge;
+ // Check logic
+ ASSERT(syncPeriodEnd > syncPeriodStart);
+ // Paranoid check on sync times
+ if(syncPeriodStart >= syncPeriodEnd) continue;
+
+ // Adjust syncPeriodEnd to emulate snapshot behaviour properly
+ box_time_t syncPeriodEndExtended = syncPeriodEnd;
+ // Using zero min file age?
+ if(minimumFileAge == 0)
+ {
+ // Add a year on to the end of the end time, to make sure we sync
+ // files which are modified after the scan run started.
+ // Of course, they may be eligable to be synced again the next time round,
+ // but this should be OK, because the changes only upload should upload no data.
+ syncPeriodEndExtended += SecondsToBoxTime((uint32_t)(356*24*3600));
+ }
+
+ // Do sync
+ bool errorOccurred = false;
+ int errorCode = 0, errorSubCode = 0;
+ try
+ {
+ // Set state and log start
+ SetState(State_Connected);
+ ::syslog(LOG_INFO, "Beginning scan of local files");
+
+ // Then create a client context object (don't just connect, as this may be unnecessary)
+ BackupClientContext clientContext(*this, tlsContext, conf.GetKeyValue("StoreHostname"),
+ conf.GetKeyValueInt("AccountNumber"), conf.GetKeyValueBool("ExtendedLogging"));
+
+ // Set up the sync parameters
+ BackupClientDirectoryRecord::SyncParams params(*this, clientContext);
+ params.mSyncPeriodStart = syncPeriodStart;
+ params.mSyncPeriodEnd = syncPeriodEndExtended; // use potentially extended end time
+ params.mMaxUploadWait = maxUploadWait;
+ params.mFileTrackingSizeThreshold = conf.GetKeyValueInt("FileTrackingSizeThreshold");
+ params.mDiffingUploadSizeThreshold = conf.GetKeyValueInt("DiffingUploadSizeThreshold");
+ params.mMaxFileTimeInFuture = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MaxFileTimeInFuture"));
+
+ // Set store marker
+ clientContext.SetClientStoreMarker(clientStoreMarker);
+
+ // Set up the locations, if necessary -- need to do it here so we have a (potential) connection to use
+ if(mLocations.empty())
+ {
+ const Configuration &locations(conf.GetSubConfiguration("BackupLocations"));
+
+ // Make sure all the directory records are set up
+ SetupLocations(clientContext, locations);
+ }
+
+ // Get some ID maps going
+ SetupIDMapsForSync();
+
+ // Delete any unused directories?
+ DeleteUnusedRootDirEntries(clientContext);
+
+ // Go through the records, syncing them
+ for(std::vector<Location *>::const_iterator i(mLocations.begin()); i != mLocations.end(); ++i)
+ {
+ // Set current and new ID map pointers in the context
+ clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex], mNewIDMaps[(*i)->mIDMapIndex]);
+
+ // Set exclude lists (context doesn't take ownership)
+ clientContext.SetExcludeLists((*i)->mpExcludeFiles, (*i)->mpExcludeDirs);
+
+ // Sync the directory
+ (*i)->mpDirectoryRecord->SyncDirectory(params, BackupProtocolClientListDirectory::RootDirectory, (*i)->mPath);
+
+ // Unset exclude lists (just in case)
+ clientContext.SetExcludeLists(0, 0);
+ }
+
+ // Errors reading any files?
+ if(params.mReadErrorsOnFilesystemObjects)
+ {
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_ReadError);
+ }
+ else
+ {
+ // Unset the read error flag, so the error is
+ // reported again in the future
+ mNotificationsSent[NotifyEvent_ReadError] = false;
+ }
+
+ // Perform any deletions required -- these are delayed until the end
+ // to allow renaming to happen neatly.
+ clientContext.PerformDeletions();
+
+ // Close any open connection
+ clientContext.CloseAnyOpenConnection();
+
+ // Get the new store marker
+ clientStoreMarker = clientContext.GetClientStoreMarker();
+
+ // Check the storage limit
+ if(clientContext.StorageLimitExceeded())
+ {
+ // Tell the sysadmin about this
+ NotifySysadmin(NotifyEvent_StoreFull);
+ }
+ else
+ {
+ // The start time of the next run is the end time of this run
+ // This is only done if the storage limit wasn't exceeded (as things won't have been done properly if it was)
+ lastSyncTime = syncPeriodEnd;
+ // unflag the storage full notify flag so that next time the store is full, and alert will be sent
+ mNotificationsSent[NotifyEvent_StoreFull] = false;
+ }
+
+ // Calculate when the next sync run should be
+ nextSyncTime = currentSyncStartTime + updateStoreInterval + Random::RandomInt(updateStoreInterval >> SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
+
+ // Commit the ID Maps
+ CommitIDMapsAfterSync();
+
+ // Log
+ ::syslog(LOG_INFO, "Finished scan of local files");
+ }
+ catch(BoxException &e)
+ {
+ errorOccurred = true;
+ errorCode = e.GetType();
+ errorSubCode = e.GetSubType();
+ }
+ catch(...)
+ {
+ // TODO: better handling of exceptions here... need to be very careful
+ errorOccurred = true;
+ }
+
+ if(errorOccurred)
+ {
+ // Is it a berkely db failure?
+ bool isBerkelyDbFailure = (errorCode == BackupStoreException::ExceptionType
+ && errorSubCode == BackupStoreException::BerkelyDBFailure);
+ if(isBerkelyDbFailure)
+ {
+ // Delete corrupt files
+ DeleteCorruptBerkelyDbFiles();
+ }
+
+ // Clear state data
+ syncPeriodStart = 0; // go back to beginning of time
+ clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+
+ // Handle restart?
+ if(StopRun())
+ {
+ ::syslog(LOG_INFO, "Exception (%d/%d) due to signal", errorCode, errorSubCode);
+ return;
+ }
+
+ // If the Berkely db files get corrupted, delete them and try again immediately
+ if(isBerkelyDbFailure)
+ {
+ ::syslog(LOG_ERR, "Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.\n");
+ ::sleep(1);
+ }
+ else
+ {
+ // Not restart/terminate, pause and retry
+ SetState(State_Error);
+ ::syslog(LOG_ERR, "Exception caught (%d/%d), reset state and waiting to retry...", errorCode, errorSubCode);
+ ::sleep(100);
+ }
+ }
+
+ // Log the stats
+ ::syslog(LOG_INFO, "File statistics: total file size uploaded %lld, bytes already on server %lld, encoded size %lld",
+ BackupStoreFile::msStats.mBytesInEncodedFiles, BackupStoreFile::msStats.mBytesAlreadyOnServer,
+ BackupStoreFile::msStats.mTotalFileStreamSize);
+ BackupStoreFile::ResetStats();
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(false /* finish */);
+
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_finish");
+ }
+
+ // Set state
+ SetState(storageLimitExceeded?State_StorageLimitExceeded:State_Idle);
+
+ } while(!StopRun());
+
+ // Make sure we have a clean start next time round (if restart)
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::UseScriptToSeeIfSyncAllowed()
+// Purpose: Private. Use a script to see if the sync should be allowed (if configured)
+// Returns -1 if it's allowed, time in seconds to wait otherwise.
+// Created: 21/6/04
+//
+// --------------------------------------------------------------------------
+int BackupDaemon::UseScriptToSeeIfSyncAllowed()
+{
+ const Configuration &conf(GetConfiguration());
+
+ // Got a script to run?
+ if(!conf.KeyExists("SyncAllowScript"))
+ {
+ // No. Do sync.
+ return -1;
+ }
+
+ // If there's no result, try again in five minutes
+ int waitInSeconds = (60*5);
+
+ // Run it?
+ pid_t pid = 0;
+ try
+ {
+ std::auto_ptr<IOStream> pscript(LocalProcessStream(conf.GetKeyValue("SyncAllowScript").c_str(), pid));
+
+ // Read in the result
+ IOStreamGetLine getLine(*pscript);
+ std::string line;
+ if(getLine.GetLine(line, true, 30000)) // 30 seconds should be enough
+ {
+ // Got a string, intepret
+ if(line == "now")
+ {
+ // Script says do it now. Obey.
+ waitInSeconds = -1;
+ }
+ else
+ {
+ // How many seconds to wait?
+ waitInSeconds = BoxConvert::Convert<int32_t, const std::string&>(line);
+ ::syslog(LOG_INFO, "Delaying sync by %d seconds (SyncAllowScript '%s')", waitInSeconds, conf.GetKeyValue("SyncAllowScript").c_str());
+ }
+ }
+
+ // Wait and then cleanup child process
+ int status = 0;
+ ::waitpid(pid, &status, 0);
+ }
+ catch(...)
+ {
+ // Ignore any exceptions
+ // Log that something bad happened
+ ::syslog(LOG_ERR, "Error running SyncAllowScript '%s'", conf.GetKeyValue("SyncAllowScript").c_str());
+ // Clean up though
+ if(pid != 0)
+ {
+ int status = 0;
+ ::waitpid(pid, &status, 0);
+ }
+ }
+
+ return waitInSeconds;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::WaitOnCommandSocket(box_time_t, bool &, bool &)
+// Purpose: Waits on a the command socket for a time of UP TO the required time
+// but may be much less, and handles a command if necessary.
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut)
+{
+ ASSERT(mpCommandSocketInfo != 0);
+ if(mpCommandSocketInfo == 0) {::sleep(1); return;} // failure case isn't too bad
+
+ TRACE1("Wait on command socket, delay = %lld\n", RequiredDelay);
+
+ try
+ {
+ // Timeout value for connections and things
+ int timeout = ((int)BoxTimeToMilliSeconds(RequiredDelay)) + 1;
+ // Handle bad boundary cases
+ if(timeout <= 0) timeout = 1;
+ if(timeout == INFTIM) timeout = 100000;
+
+ // Wait for socket connection, or handle a command?
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ // No connection, listen for a new one
+ mpCommandSocketInfo->mpConnectedSocket.reset(mpCommandSocketInfo->mListeningSocket.Accept(timeout).release());
+
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ // If a connection didn't arrive, there was a timeout, which means we've
+ // waited long enough and it's time to go.
+ return;
+ }
+ else
+ {
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+ bool uidOK = true;
+ ::syslog(LOG_ERR, "On this platform, no security check can be made on the credientials of peers connecting to the command socket. (bbackupctl)");
+#else
+ // Security check -- does the process connecting to this socket have
+ // the same UID as this process?
+ bool uidOK = false;
+ // BLOCK
+ {
+ uid_t remoteEUID = 0xffff;
+ gid_t remoteEGID = 0xffff;
+ if(mpCommandSocketInfo->mpConnectedSocket->GetPeerCredentials(remoteEUID, remoteEGID))
+ {
+ // Credentials are available -- check UID
+ if(remoteEUID == ::getuid())
+ {
+ // Acceptable
+ uidOK = true;
+ }
+ }
+ }
+#endif
+
+ // Is this an acceptible connection?
+ if(!uidOK)
+ {
+ // Dump the connection
+ ::syslog(LOG_ERR, "Incoming command connection from peer had different user ID than this process, or security check could not be completed.");
+ mpCommandSocketInfo->mpConnectedSocket.reset();
+ return;
+ }
+ else
+ {
+ // Log
+ ::syslog(LOG_INFO, "Connection from command socket");
+
+ // Send a header line summarising the configuration and current state
+ const Configuration &conf(GetConfiguration());
+ char summary[256];
+ int summarySize = sprintf(summary, "bbackupd: %d %d %d %d\nstate %d\n",
+ conf.GetKeyValueBool("AutomaticBackup"),
+ conf.GetKeyValueInt("UpdateStoreInterval"),
+ conf.GetKeyValueInt("MinimumFileAge"),
+ conf.GetKeyValueInt("MaxUploadWait"),
+ mState);
+ mpCommandSocketInfo->mpConnectedSocket->Write(summary, summarySize);
+
+ // Set the timeout to something very small, so we don't wait too long on waiting
+ // for any incoming data
+ timeout = 10; // milliseconds
+ }
+ }
+ }
+
+ // So there must be a connection now.
+ ASSERT(mpCommandSocketInfo->mpConnectedSocket.get() != 0);
+
+ // Is there a getline object ready?
+ if(mpCommandSocketInfo->mpGetLine == 0)
+ {
+ // Create a new one
+ mpCommandSocketInfo->mpGetLine = new IOStreamGetLine(*(mpCommandSocketInfo->mpConnectedSocket.get()));
+ }
+
+ // Ping the remote side, to provide errors which will mean the socket gets closed
+ mpCommandSocketInfo->mpConnectedSocket->Write("ping\n", 5);
+
+ // Wait for a command or something on the socket
+ std::string command;
+ while(mpCommandSocketInfo->mpGetLine != 0 && !mpCommandSocketInfo->mpGetLine->IsEOF()
+ && mpCommandSocketInfo->mpGetLine->GetLine(command, false /* no preprocessing */, timeout))
+ {
+ TRACE1("Receiving command '%s' over command socket\n", command.c_str());
+
+ bool sendOK = false;
+ bool sendResponse = true;
+
+ // Command to process!
+ if(command == "quit" || command == "")
+ {
+ // Close the socket.
+ CloseCommandConnection();
+ sendResponse = false;
+ }
+ else if(command == "sync")
+ {
+ // Sync now!
+ DoSyncFlagOut = true;
+ SyncIsForcedOut = false;
+ sendOK = true;
+ }
+ else if(command == "force-sync")
+ {
+ // Sync now (forced -- overrides any SyncAllowScript)
+ DoSyncFlagOut = true;
+ SyncIsForcedOut = true;
+ sendOK = true;
+ }
+ else if(command == "reload")
+ {
+ // Reload the configuration
+ SetReloadConfigWanted();
+ sendOK = true;
+ }
+ else if(command == "terminate")
+ {
+ // Terminate the daemon cleanly
+ SetTerminateWanted();
+ sendOK = true;
+ }
+
+ // Send a response back?
+ if(sendResponse)
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(sendOK?"ok\n":"error\n", sendOK?3:6);
+ }
+
+ // Set timeout to something very small, so this just checks for data which is waiting
+ timeout = 1;
+ }
+
+ // Close on EOF?
+ if(mpCommandSocketInfo->mpGetLine != 0 && mpCommandSocketInfo->mpGetLine->IsEOF())
+ {
+ CloseCommandConnection();
+ }
+ }
+ catch(...)
+ {
+ // If an error occurs, and there is a connection active, just close that
+ // connection and continue. Otherwise, let the error propagate.
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ throw;
+ }
+ else
+ {
+ // Close socket and ignore error
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CloseCommandConnection()
+// Purpose: Close the command connection, ignoring any errors
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::CloseCommandConnection()
+{
+ try
+ {
+ TRACE0("Closing command connection\n");
+
+ if(mpCommandSocketInfo->mpGetLine)
+ {
+ delete mpCommandSocketInfo->mpGetLine;
+ mpCommandSocketInfo->mpGetLine = 0;
+ }
+ mpCommandSocketInfo->mpConnectedSocket.reset();
+ }
+ catch(...)
+ {
+ // Ignore any errors
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.cpp
+// Purpose: Send a start or finish sync message to the command socket, if it's connected.
+//
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SendSyncStartOrFinish(bool SendStart)
+{
+
+ // The bbackupctl program can't rely on a state change, because it may never
+ // change if the server doesn't need to be contacted.
+
+ if(mpCommandSocketInfo != 0 && mpCommandSocketInfo->mpConnectedSocket.get() != 0)
+ {
+ try
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(SendStart?"start-sync\n":"finish-sync\n", SendStart?11:12);
+ }
+ catch(...)
+ {
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+
+
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // string comparison ordering for when mount points are handled
+ // by code, rather than the OS.
+ typedef struct
+ {
+ bool operator()(const std::string &s1, const std::string &s2)
+ {
+ if(s1.size() == s2.size())
+ {
+ // Equal size, sort according to natural sort order
+ return s1 < s2;
+ }
+ else
+ {
+ // Make sure longer strings go first
+ return s1.size() > s2.size();
+ }
+ }
+ } mntLenCompare;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupLocations(BackupClientContext &, const Configuration &)
+// Purpose: Makes sure that the list of directories records is correctly set up
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf)
+{
+ if(!mLocations.empty())
+ {
+ // Looks correctly set up
+ return;
+ }
+
+ // Make sure that if a directory is reinstated, then it doesn't get deleted
+ mDeleteUnusedRootDirEntriesAfter = 0;
+ mUnusedRootDirEntries.clear();
+
+ // Just a check to make sure it's right.
+ DeleteAllLocations();
+
+ // Going to need a copy of the root directory. Get a connection, and fetch it.
+ BackupProtocolClient &connection(rClientContext.GetConnection());
+
+ // Ask server for a list of everything in the root directory, which is a directory itself
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
+ BackupProtocolClientListDirectory::RootDirectory,
+ BackupProtocolClientListDirectory::Flags_Dir, // only directories
+ BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
+ false /* no attributes */));
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(connection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, connection.GetTimeout());
+
+ // Map of mount names to ID map index
+ std::map<std::string, int> mounts;
+ int numIDMaps = 0;
+
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // Linux can't tell you where a directory is mounted. So we have to
+ // read the mount entries from /etc/mtab! Bizarre that the OS itself
+ // can't tell you, but there you go.
+ std::set<std::string, mntLenCompare> mountPoints;
+ // BLOCK
+ FILE *mountPointsFile = 0;
+ try
+ {
+ // Open mounts file
+ mountPointsFile = ::setmntent("/etc/mtab", "r");
+ if(mountPointsFile == 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ // Read all the entries, and put them in the set
+ struct mntent *entry = 0;
+ while((entry = ::getmntent(mountPointsFile)) != 0)
+ {
+ TRACE1("Found mount point at %s\n", entry->mnt_dir);
+ mountPoints.insert(std::string(entry->mnt_dir));
+ }
+
+ // Close mounts file
+ ::endmntent(mountPointsFile);
+ }
+ catch(...)
+ {
+ if(mountPointsFile != 0)
+ {
+ ::endmntent(mountPointsFile);
+ }
+ throw;
+ }
+ // Check sorting and that things are as we expect
+ ASSERT(mountPoints.size() > 0);
+#ifndef NDEBUG
+ {
+ std::set<std::string, mntLenCompare>::const_reverse_iterator i(mountPoints.rbegin());
+ ASSERT(*i == "/");
+ }
+#endif // n NDEBUG
+#endif // PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+
+ // Then... go through each of the entries in the configuration,
+ // making sure there's a directory created for it.
+ for(std::list<std::pair<std::string, Configuration> >::const_iterator i = rLocationsConf.mSubConfigurations.begin();
+ i != rLocationsConf.mSubConfigurations.end(); ++i)
+ {
+TRACE0("new location\n");
+ // Create a record for it
+ Location *ploc = new Location;
+ try
+ {
+ // Setup names in the location record
+ ploc->mName = i->first;
+ ploc->mPath = i->second.GetKeyValue("Path");
+
+ // Read the exclude lists from the Configuration
+ ploc->mpExcludeFiles = BackupClientMakeExcludeList_Files(i->second);
+ ploc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(i->second);
+
+ // Do a fsstat on the pathname to find out which mount it's on
+ {
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // Warn in logs if the directory isn't absolute
+ if(ploc->mPath[0] != '/')
+ {
+ ::syslog(LOG_ERR, "Location path '%s' isn't absolute", ploc->mPath.c_str());
+ }
+ // Go through the mount points found, and find a suitable one
+ std::string mountName("/");
+ {
+ std::set<std::string, mntLenCompare>::const_iterator i(mountPoints.begin());
+ TRACE1("%d potential mount points\n", mountPoints.size());
+ for(; i != mountPoints.end(); ++i)
+ {
+ // Compare first n characters with the filename
+ // If it matches, the file belongs in that mount point
+ // (sorting order ensures this)
+ TRACE1("checking against mount point %s\n", i->c_str());
+ if(::strncmp(i->c_str(), ploc->mPath.c_str(), i->size()) == 0)
+ {
+ // Match
+ mountName = *i;
+ break;
+ }
+ }
+ TRACE2("mount point chosen for %s is %s\n", ploc->mPath.c_str(), mountName.c_str());
+ }
+#else
+ // BSD style statfs -- includes mount point, which is nice.
+ struct statfs s;
+ if(::statfs(ploc->mPath.c_str(), &s) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ // Where the filesystem is mounted
+ std::string mountName(s.f_mntonname);
+#endif
+
+ // Got it?
+ std::map<std::string, int>::iterator f(mounts.find(mountName));
+ if(f != mounts.end())
+ {
+ // Yes -- store the index
+ ploc->mIDMapIndex = f->second;
+ }
+ else
+ {
+ // No -- new index
+ ploc->mIDMapIndex = numIDMaps;
+ mounts[mountName] = numIDMaps;
+
+ // Store the mount name
+ mIDMapMounts.push_back(mountName);
+
+ // Increment number of maps
+ ++numIDMaps;
+ }
+ }
+
+ // Does this exist on the server?
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreFilenameClear dirname(ploc->mName); // generate the filename
+ BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
+ int64_t oid = 0;
+ if(en != 0)
+ {
+ oid = en->GetObjectID();
+
+ // Delete the entry from the directory, so we get a list of
+ // unused root directories at the end of this.
+ dir.DeleteEntry(oid);
+ }
+ else
+ {
+ // Doesn't exist, so it has to be created on the server. Let's go!
+ // First, get the directory's attributes and modification time
+ box_time_t attrModTime = 0;
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(ploc->mPath.c_str(), true /* directories have zero mod times */,
+ 0 /* not interested in mod time */, &attrModTime /* get the attribute modification time */);
+
+ // Execute create directory command
+ MemBlockStream attrStream(attr);
+ std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
+ BackupProtocolClientListDirectory::RootDirectory,
+ attrModTime, dirname, attrStream));
+
+ // Object ID for later creation
+ oid = dirCreate->GetObjectID();
+ }
+
+ // Create and store the directory object for the root of this location
+ ASSERT(oid != 0);
+ BackupClientDirectoryRecord *precord = new BackupClientDirectoryRecord(oid, i->first);
+ ploc->mpDirectoryRecord.reset(precord);
+
+ // Push it back on the vector of locations
+ mLocations.push_back(ploc);
+ }
+ catch(...)
+ {
+ delete ploc;
+ ploc = 0;
+ throw;
+ }
+ }
+
+ // Any entries in the root directory which need deleting?
+ if(dir.GetNumberOfEntries() > 0)
+ {
+ ::syslog(LOG_INFO, "%d redundant locations in root directory found, will delete from store after %d seconds.",
+ dir.GetNumberOfEntries(), BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
+
+ // Store directories in list of things to delete
+ mUnusedRootDirEntries.clear();
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = iter.Next()) != 0)
+ {
+ // Add name to list
+ BackupStoreFilenameClear clear(en->GetName());
+ const std::string &name(clear.GetClearFilename());
+ mUnusedRootDirEntries.push_back(std::pair<int64_t,std::string>(en->GetObjectID(), name));
+ // Log this
+ ::syslog(LOG_INFO, "Unused location in root: %s", name.c_str());
+ }
+ ASSERT(mUnusedRootDirEntries.size() > 0);
+ // Time to delete them
+ mDeleteUnusedRootDirEntriesAfter =
+ GetCurrentBoxTime() + SecondsToBoxTime((uint32_t)BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupIDMapsForSync()
+// Purpose: Sets up ID maps for the sync process -- make sure they're all there
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupIDMapsForSync()
+{
+ // Need to do different things depending on whether it's an in memory implementation,
+ // or whether it's all stored on disc.
+
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+
+ // Make sure we have some blank, empty ID maps
+ DeleteIDMapVector(mNewIDMaps);
+ FillIDMapVector(mNewIDMaps, true /* new maps */);
+
+ // Then make sure that the current maps have objects, even if they are empty
+ // (for the very first run)
+ if(mCurrentIDMaps.empty())
+ {
+ FillIDMapVector(mCurrentIDMaps, false /* current maps */);
+ }
+
+#else
+
+ // Make sure we have some blank, empty ID maps
+ DeleteIDMapVector(mNewIDMaps);
+ FillIDMapVector(mNewIDMaps, true /* new maps */);
+ DeleteIDMapVector(mCurrentIDMaps);
+ FillIDMapVector(mCurrentIDMaps, false /* new maps */);
+
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &)
+// Purpose: Fills the vector with the right number of empty ID maps
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector, bool NewMaps)
+{
+ ASSERT(rVector.size() == 0);
+ rVector.reserve(mIDMapMounts.size());
+
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ // Create the object
+ BackupClientInodeToIDMap *pmap = new BackupClientInodeToIDMap();
+ try
+ {
+ // Get the base filename of this map
+ std::string filename;
+ MakeMapBaseName(l, filename);
+
+ // If it's a new one, add a suffix
+ if(NewMaps)
+ {
+ filename += ".n";
+ }
+
+ // If it's not a new map, it may not exist in which case an empty map should be created
+ if(!NewMaps && !FileExists(filename.c_str()))
+ {
+ pmap->OpenEmpty();
+ }
+ else
+ {
+ // Open the map
+ pmap->Open(filename.c_str(), !NewMaps /* read only */, NewMaps /* create new */);
+ }
+
+ // Store on vector
+ rVector.push_back(pmap);
+ }
+ catch(...)
+ {
+ delete pmap;
+ throw;
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteCorruptBerkelyDbFiles()
+// Purpose: Delete the Berkely db files from disc after they have been corrupted.
+// Created: 14/9/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteCorruptBerkelyDbFiles()
+{
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ // Get the base filename of this map
+ std::string filename;
+ MakeMapBaseName(l, filename);
+
+ // Delete the file
+ TRACE1("Deleting %s\n", filename.c_str());
+ ::unlink(filename.c_str());
+
+ // Add a suffix for the new map
+ filename += ".n";
+
+ // Delete that too
+ TRACE1("Deleting %s\n", filename.c_str());
+ ::unlink(filename.c_str());
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: MakeMapBaseName(unsigned int, std::string &)
+// Purpose: Makes the base name for a inode map
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::MakeMapBaseName(unsigned int MountNumber, std::string &rNameOut) const
+{
+ // Get the directory for the maps
+ const Configuration &config(GetConfiguration());
+ std::string dir(config.GetKeyValue("DataDirectory"));
+
+ // Make a leafname
+ std::string leaf(mIDMapMounts[MountNumber]);
+ for(unsigned int z = 0; z < leaf.size(); ++z)
+ {
+ if(leaf[z] == DIRECTORY_SEPARATOR_ASCHAR)
+ {
+ leaf[z] = '_';
+ }
+ }
+
+ // Build the final filename
+ rNameOut = dir + DIRECTORY_SEPARATOR "mnt" + leaf;
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommitIDMapsAfterSync()
+// Purpose: Commits the new ID maps, so the 'new' maps are now the 'current' maps.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::CommitIDMapsAfterSync()
+{
+ // Need to do different things depending on whether it's an in memory implementation,
+ // or whether it's all stored on disc.
+
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ // Remove the current ID maps
+ DeleteIDMapVector(mCurrentIDMaps);
+
+ // Copy the (pointers to) "new" maps over to be the new "current" maps
+ mCurrentIDMaps = mNewIDMaps;
+
+ // Clear the new ID maps vector (not delete them!)
+ mNewIDMaps.clear();
+
+#else
+
+ // Get rid of the maps in memory (leaving them on disc of course)
+ DeleteIDMapVector(mCurrentIDMaps);
+ DeleteIDMapVector(mNewIDMaps);
+
+ // Then move the old maps into the new places
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ std::string target;
+ MakeMapBaseName(l, target);
+ std::string newmap(target + ".n");
+
+ // Try to rename
+ if(::rename(newmap.c_str(), target.c_str()) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ }
+
+#endif
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &)
+// Purpose: Deletes the contents of a vector of ID maps
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector)
+{
+ while(!rVector.empty())
+ {
+ // Pop off list
+ BackupClientInodeToIDMap *toDel = rVector.back();
+ rVector.pop_back();
+
+ // Close and delete
+ toDel->Close();
+ delete toDel;
+ }
+ ASSERT(rVector.size() == 0);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::FindLocationPathName(const std::string &, std::string &) const
+// Purpose: Tries to find the path of the root of a backup location. Returns true (and path in rPathOut)
+// if it can be found, false otherwise.
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupDaemon::FindLocationPathName(const std::string &rLocationName, std::string &rPathOut) const
+{
+ // Search for the location
+ for(std::vector<Location *>::const_iterator i(mLocations.begin()); i != mLocations.end(); ++i)
+ {
+ if((*i)->mName == rLocationName)
+ {
+ rPathOut = (*i)->mPath;
+ return true;
+ }
+ }
+
+ // Didn't find it
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetState(int)
+// Purpose: Record current action of daemon, and update process title to reflect this
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetState(int State)
+{
+ // Two little checks
+ if(State == mState) return;
+ if(State < 0) return;
+
+ // Update
+ mState = State;
+
+ // Set process title
+ const static char *stateText[] = {"idle", "connected", "error -- waiting for retry", "over limit on server -- not backing up"};
+ SetProcessTitle(stateText[State]);
+
+ // If there's a command socket connected, then inform it -- disconnecting from the
+ // command socket if there's an error
+ if(mpCommandSocketInfo != 0 && mpCommandSocketInfo->mpConnectedSocket.get() != 0)
+ {
+ // Something connected to the command socket, tell it about the new state
+ char newState[64];
+ char newStateSize = sprintf(newState, "state %d\n", State);
+ try
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(newState, newStateSize);
+ }
+ catch(...)
+ {
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::TouchFileInWorkingDir(const char *)
+// Purpose: Make sure a zero length file of the name exists in the working directory.
+// Use for marking times of events in the filesystem.
+// Created: 21/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::TouchFileInWorkingDir(const char *Filename)
+{
+ // Filename
+ const Configuration &config(GetConfiguration());
+ std::string fn(config.GetKeyValue("DataDirectory") + DIRECTORY_SEPARATOR_ASCHAR);
+ fn += Filename;
+
+ // Open and close it to update the timestamp
+ FileStream touch(fn.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::NotifySysadmin(int)
+// Purpose: Run the script to tell the sysadmin about events which need attention.
+// Created: 25/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::NotifySysadmin(int Event)
+{
+ static const char *sEventNames[] = {"store-full", "read-error", 0};
+
+ TRACE1("BackupDaemon::NotifySysadmin() called, event = %d\n", Event);
+
+ if(Event < 0 || Event > NotifyEvent__MAX)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadNotifySysadminEventCode);
+ }
+
+ // Don't send lots of repeated messages
+ if(mNotificationsSent[Event])
+ {
+ return;
+ }
+
+ // Is there a notifation script?
+ const Configuration &conf(GetConfiguration());
+ if(!conf.KeyExists("NotifyScript"))
+ {
+ // Log, and then return
+ ::syslog(LOG_ERR, "Not notifying administrator about event %s -- set NotifyScript to do this in future", sEventNames[Event]);
+ return;
+ }
+
+ // Script to run
+ std::string script(conf.GetKeyValue("NotifyScript") + ' ' + sEventNames[Event]);
+
+ // Log what we're about to do
+ ::syslog(LOG_INFO, "About to notify administrator about event %s, running script '%s'", sEventNames[Event], script.c_str());
+
+ // Then do it
+ if(::system(script.c_str()) != 0)
+ {
+ ::syslog(LOG_ERR, "Notify script returned an error code. ('%s')", script.c_str());
+ }
+
+ // Flag that this is done so the administrator isn't constantly bombarded with lots of errors
+ mNotificationsSent[Event] = true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &)
+// Purpose: Deletes any unused entries in the root directory, if they're scheduled to be deleted.
+// Created: 13/5/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &rContext)
+{
+ if(mUnusedRootDirEntries.empty() || mDeleteUnusedRootDirEntriesAfter == 0)
+ {
+ // Nothing to do.
+ return;
+ }
+
+ // Check time
+ if(GetCurrentBoxTime() < mDeleteUnusedRootDirEntriesAfter)
+ {
+ // Too early to delete files
+ return;
+ }
+
+ // Entries to delete, and it's the right time to do so...
+ ::syslog(LOG_INFO, "Deleting unused locations from store root...");
+ BackupProtocolClient &connection(rContext.GetConnection());
+ for(std::vector<std::pair<int64_t,std::string> >::iterator i(mUnusedRootDirEntries.begin()); i != mUnusedRootDirEntries.end(); ++i)
+ {
+ connection.QueryDeleteDirectory(i->first);
+
+ // Log this
+ ::syslog(LOG_INFO, "Deleted %s (ID %08llx) from store root", i->second.c_str(), i->first);
+ }
+
+ // Reset state
+ mDeleteUnusedRootDirEntriesAfter = 0;
+ mUnusedRootDirEntries.clear();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Location::Location()
+// Purpose: Constructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupDaemon::Location::Location()
+ : mIDMapIndex(0),
+ mpExcludeFiles(0),
+ mpExcludeDirs(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Location::~Location()
+// Purpose: Destructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupDaemon::Location::~Location()
+{
+ // Clean up exclude locations
+ if(mpExcludeDirs != 0)
+ {
+ delete mpExcludeDirs;
+ mpExcludeDirs = 0;
+ }
+ if(mpExcludeFiles != 0)
+ {
+ delete mpExcludeFiles;
+ mpExcludeFiles = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommandSocketInfo::CommandSocketInfo()
+// Purpose: Constructor
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+BackupDaemon::CommandSocketInfo::CommandSocketInfo()
+ : mpGetLine(0)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
+// Purpose: Destructor
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
+{
+ if(mpGetLine)
+ {
+ delete mpGetLine;
+ mpGetLine = 0;
+ }
+}
+
diff --git a/bin/bbackupd/BackupDaemon.h b/bin/bbackupd/BackupDaemon.h
new file mode 100755
index 00000000..ffaf5783
--- /dev/null
+++ b/bin/bbackupd/BackupDaemon.h
@@ -0,0 +1,166 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.h
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPDAEMON__H
+#define BACKUPDAEMON__H
+
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "Daemon.h"
+#include "BoxTime.h"
+#include "Socket.h"
+#include "SocketListen.h"
+#include "SocketStream.h"
+
+class BackupClientDirectoryRecord;
+class BackupClientContext;
+class Configuration;
+class BackupClientInodeToIDMap;
+class ExcludeList;
+class IOStreamGetLine;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupDaemon
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupDaemon : public Daemon
+{
+public:
+ BackupDaemon();
+ ~BackupDaemon();
+private:
+ BackupDaemon(const BackupDaemon &);
+public:
+
+ void Run();
+ virtual const char *DaemonName() const;
+ virtual const char *DaemonBanner() const;
+ const ConfigurationVerify *GetConfigVerify() const;
+
+ bool FindLocationPathName(const std::string &rLocationName, std::string &rPathOut) const;
+
+ enum
+ {
+ // Add stuff to this, make sure the textual equivalents in SetState() are changed too.
+ State_Initialising = -1,
+ State_Idle = 0,
+ State_Connected = 1,
+ State_Error = 2,
+ State_StorageLimitExceeded = 3
+ };
+
+ int GetState() {return mState;}
+
+ // Allow other classes to call this too
+ enum
+ {
+ NotifyEvent_StoreFull = 0,
+ NotifyEvent_ReadError = 1,
+ NotifyEvent__MAX = 1
+ // When adding notifications, remember to add strings to NotifySysadmin()
+ };
+ void NotifySysadmin(int Event);
+
+private:
+ void Run2();
+
+ void DeleteAllLocations();
+ void SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf);
+
+ void DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector);
+ void DeleteAllIDMaps()
+ {
+ DeleteIDMapVector(mCurrentIDMaps);
+ DeleteIDMapVector(mNewIDMaps);
+ }
+ void FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector, bool NewMaps);
+
+ void SetupIDMapsForSync();
+ void CommitIDMapsAfterSync();
+ void DeleteCorruptBerkelyDbFiles();
+
+ void MakeMapBaseName(unsigned int MountNumber, std::string &rNameOut) const;
+
+ void SetState(int State);
+
+ void WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut);
+ void CloseCommandConnection();
+ void SendSyncStartOrFinish(bool SendStart);
+
+ void TouchFileInWorkingDir(const char *Filename);
+
+ void DeleteUnusedRootDirEntries(BackupClientContext &rContext);
+
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+ // For warning user about potential security hole
+ virtual void SetupInInitialProcess();
+#endif
+
+ int UseScriptToSeeIfSyncAllowed();
+
+private:
+ class Location
+ {
+ public:
+ Location();
+ ~Location();
+ private:
+ Location(const Location &); // copy not allowed
+ Location &operator=(const Location &);
+ public:
+ std::string mName;
+ std::string mPath;
+ std::auto_ptr<BackupClientDirectoryRecord> mpDirectoryRecord;
+ int mIDMapIndex;
+ ExcludeList *mpExcludeFiles;
+ ExcludeList *mpExcludeDirs;
+ };
+
+ int mState; // what the daemon is currently doing
+
+ std::vector<Location *> mLocations;
+
+ std::vector<std::string> mIDMapMounts;
+ std::vector<BackupClientInodeToIDMap *> mCurrentIDMaps;
+ std::vector<BackupClientInodeToIDMap *> mNewIDMaps;
+
+ // For the command socket
+ class CommandSocketInfo
+ {
+ public:
+ CommandSocketInfo();
+ ~CommandSocketInfo();
+ private:
+ CommandSocketInfo(const CommandSocketInfo &); // no copying
+ CommandSocketInfo &operator=(const CommandSocketInfo &);
+ public:
+ SocketListen<SocketStream, 1 /* listen backlog */> mListeningSocket;
+ std::auto_ptr<SocketStream> mpConnectedSocket;
+ IOStreamGetLine *mpGetLine;
+ };
+
+ // Using a socket?
+ CommandSocketInfo *mpCommandSocketInfo;
+
+ // Stop notifications being repeated.
+ bool mNotificationsSent[NotifyEvent__MAX + 1];
+
+ // Unused entries in the root directory wait a while before being deleted
+ box_time_t mDeleteUnusedRootDirEntriesAfter; // time to delete them
+ std::vector<std::pair<int64_t,std::string> > mUnusedRootDirEntries;
+};
+
+#endif // BACKUPDAEMON__H
+
diff --git a/bin/bbackupd/bbackupd-config b/bin/bbackupd/bbackupd-config
new file mode 100755
index 00000000..c5e52282
--- /dev/null
+++ b/bin/bbackupd/bbackupd-config
@@ -0,0 +1,525 @@
+#!/usr/bin/perl
+use strict;
+
+# should be running as root
+if($> != 0)
+{
+ printf "\nWARNING: this should be run as root\n\n"
+}
+
+sub error_print_usage
+{
+ print <<__E;
+
+Setup bbackupd config utility.
+
+Bad command line parameters.
+Usage:
+ bbackupd-config config-dir backup-mode account-num server-hostname working-dir backup-dir [more backup directories]
+
+config-dir usually /etc/box
+backup-mode is lazy or snapshot
+ lazy mode runs continously, uploading files over a specified age
+ snapshot mode uploads a snapshot of the filesystem when instructed explicitly
+account-num (hexdecimal) and server-hostname as supplied from the server administrator
+working-dir usually /var/bbackupd
+backup-dir, list of directories to back up
+
+__E
+ print "=========\nERROR:\n",$_[0],"\n\n" if $_[0] ne '';
+ exit(1);
+}
+
+# check and get command line parameters
+if($#ARGV < 4)
+{
+ error_print_usage();
+}
+
+# check for OPENSSL_CONF environment var being set
+if(exists $ENV{'OPENSSL_CONF'})
+{
+ print <<__E;
+
+---------------------------------------
+
+WARNING:
+ You have the OPENSSL_CONF environment variable set.
+ Use of non-standard openssl configs may cause problems.
+
+---------------------------------------
+
+__E
+}
+
+# default locations
+my $default_config_location = '/etc/box/bbackupd.conf';
+
+# command line parameters
+my ($config_dir,$backup_mode,$account_num,$server,$working_dir,@tobackup) = @ARGV;
+
+# check backup mode is valid
+if($backup_mode ne 'lazy' && $backup_mode ne 'snapshot')
+{
+ error_print_usage("ERROR: backup mode must be 'lazy' or 'snapshot'");
+}
+
+# check server exists
+{
+ my @r = gethostbyname($server);
+ if($#r < 0)
+ {
+ error_print_usage("Backup server specified as '$server', but it could not found.\n(A test DNS lookup failed -- check arguments)");
+ }
+}
+
+if($working_dir !~ m~\A/~)
+{
+ error_print_usage("Working directory $working_dir is not specified as an absolute path");
+}
+
+# ssl stuff
+my $private_key = "$config_dir/bbackupd/$account_num-key.pem";
+my $certificate_request = "$config_dir/bbackupd/$account_num-csr.pem";
+my $certificate = "$config_dir/bbackupd/$account_num-cert.pem";
+my $ca_root_cert = "$config_dir/bbackupd/serverCA.pem";
+
+# encryption keys
+my $enc_key_file = "$config_dir/bbackupd/$account_num-FileEncKeys.raw";
+
+# other files
+my $config_file = "$config_dir/bbackupd.conf";
+my $notify_script = "$config_dir/bbackupd/NotifySysadmin.sh";
+
+# check that the directories are allowable
+for(@tobackup)
+{
+ if($_ eq '/')
+ {
+ die "It is not recommended that you backup the root directory of your disc";
+ }
+ if($_ !~ m/\A\//)
+ {
+ die "Directory $_ is not specified as an absolute path";
+ }
+ if(!-d $_)
+ {
+ die "$_ is not a directory";
+ }
+}
+
+# summarise configuration
+
+print <<__E;
+
+Setup bbackupd config utility.
+
+Configuration:
+ Writing configuration file: $config_file
+ Account: $account_num
+ Server hostname: $server
+ Directories to back up:
+__E
+print ' ',$_,"\n" for(@tobackup);
+print <<__E;
+
+Note: If other file systems are mounted inside these directories, then problems may occur
+with files on the store server being renamed incorrectly. This will cause efficiency
+problems, but not affect the integrity of the backups.
+
+WARNING: Directories not checked against mountpoints. Check mounted filesystems manually.
+
+__E
+
+# create directories
+if(!-d $config_dir)
+{
+ printf "Creating $config_dir...\n";
+ mkdir $config_dir,0755 or die "Can't create $config_dir";
+}
+
+if(!-d "$config_dir/bbackupd")
+{
+ printf "Creating $config_dir/bbackupd\n";
+ mkdir "$config_dir/bbackupd",0700 or die "Can't create $config_dir/bbackupd";
+}
+
+if(!-d "$working_dir")
+{
+ printf "Creating $working_dir\n";
+ if(!mkdir($working_dir,0700))
+ {
+ die "Couldn't create $working_dir -- create this manually and try again\n";
+ }
+}
+
+# generate the private key for the server
+if(!-f $private_key)
+{
+ print "Generating private key...\n";
+ if(system("openssl genrsa -out $private_key 2048") != 0)
+ {
+ die "Couldn't generate private key."
+ }
+}
+
+# generate a certificate request
+if(!-f $certificate_request)
+{
+ die "Couldn't run openssl for CSR generation" unless
+ open(CSR,"|openssl req -new -key $private_key -sha1 -out $certificate_request");
+ print CSR <<__E;
+.
+.
+.
+.
+.
+BACKUP-$account_num
+.
+.
+.
+
+__E
+ close CSR;
+ print "\n\n";
+ die "Certificate request wasn't created.\n" unless -f $certificate_request
+}
+
+# generate the key material for the file
+if(!-f $enc_key_file)
+{
+ print "Generating keys for file backup\n";
+ if(system("openssl rand -out $enc_key_file 1024") != 0)
+ {
+ die "Couldn't generate file backup keys."
+ }
+}
+
+# write the notify when store full script
+print "Writing notify script $notify_script\n";
+open NOTIFY,">$notify_script" or die "Can't open for writing";
+
+my $hostname = `hostname`; chomp $hostname;
+my $current_username = `whoami`; chomp $current_username;
+my $sendmail = `whereis sendmail`; chomp $sendmail;
+$sendmail =~ s/\n.\Z//s;
+# for Linux style whereis
+$sendmail = $1 if $sendmail =~ /^sendmail:\s+([\S]+)/;
+# last ditch guess
+$sendmail = 'sendmail' if $sendmail !~ m/\S/;
+
+print NOTIFY <<__EOS;
+#!/bin/sh
+
+SUBJECT="BACKUP PROBLEM on host $hostname"
+SENDTO="$current_username"
+
+if [ \$1 = store-full ]
+then
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (store full)
+To: \$SENDTO
+
+
+The store account for $hostname is full.
+
+=============================
+FILES ARE NOT BEING BACKED UP
+=============================
+
+Please adjust the limits on account $account_num on server $server.
+
+EOM
+elif [ \$1 = read-error ]
+then
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (read errors)
+To: \$SENDTO
+
+
+Errors occured reading some files or directories for backup on $hostname.
+
+===================================
+THESE FILES ARE NOT BEING BACKED UP
+===================================
+
+Check the logs on $hostname for the files and directories which caused
+these errors, and take appropraite action.
+
+Other files are being backed up.
+
+EOM
+else
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (unknown)
+To: \$SENDTO
+
+
+The backup daemon on $hostname reported an unknown error.
+
+==========================
+FILES MAY NOT BE BACKED UP
+==========================
+
+Please check the logs on $hostname.
+
+EOM
+fi
+__EOS
+
+close NOTIFY;
+chmod 0700,$notify_script or die "Can't chmod $notify_script";
+
+
+# write the configuration file
+print "Writing configuration file $config_file\n";
+open CONFIG,">$config_file" or die "Can't open config file for writing";
+print CONFIG <<__E;
+
+StoreHostname = $server
+AccountNumber = 0x$account_num
+KeysFile = $enc_key_file
+
+CertificateFile = $certificate
+PrivateKeyFile = $private_key
+TrustedCAsFile = $ca_root_cert
+
+DataDirectory = $working_dir
+
+
+# This script is run whenever bbackupd encounters a problem which requires
+# the system administrator to assist:
+# 1) The store is full, and no more data can be uploaded.
+# 2) Some files or directories were not readable.
+# The default script emails the system administrator.
+
+NotifyScript = $notify_script
+
+__E
+
+if($backup_mode eq 'lazy')
+{
+ # lazy mode configuration
+ print CONFIG <<__E;
+
+# A scan of the local discs will be made once an hour (approximately).
+# To avoid cycles of load on the server, this time is randomly adjusted by a small
+# percentage as the daemon runs.
+
+UpdateStoreInterval = 3600
+
+
+# A file must have been modified at least 6 hours ago before it will be uploaded.
+
+MinimumFileAge = 21600
+
+
+# If a file is modified repeated, it won't be uploaded immediately in case it's modified again.
+# However, it should be uploaded eventually. This is how long we should wait after first noticing
+# a change. (1 day)
+
+MaxUploadWait = 86400
+
+__E
+}
+else
+{
+ # snapshot configuration
+ print CONFIG <<__E;
+
+# This configuration file is written for snapshot mode.
+# You will need to run bbackupctl to instruct the daemon to upload files.
+
+AutomaticBackup = no
+UpdateStoreInterval = 0
+MinimumFileAge = 0
+MaxUploadWait = 0
+
+__E
+}
+
+print CONFIG <<__E;
+
+# Files above this size (in bytes) are tracked, and if they are renamed they will simply be
+# renamed on the server, rather than being uploaded again. (64k - 1)
+
+FileTrackingSizeThreshold = 65535
+
+
+# The daemon does "changes only" uploads for files above this size (in bytes).
+# Files less than it are uploaded whole without this extra processing.
+
+DiffingUploadSizeThreshold = 8192
+
+
+# The limit on how much time is spent diffing files. Most files shouldn't take very long,
+# but if you have really big files you can use this to limit the time spent diffing them.
+# * Reduce if you are having problems with processor usage.
+# * Increase if you have large files, and think the upload of changes is too large and want
+# to spend more time searching for unchanged blocks.
+
+MaximumDiffingTime = 20
+
+
+# Uncomment this line to see exactly what the daemon is going when it's connected to the server.
+
+# ExtendedLogging = yes
+
+
+# Use this to temporarily stop bbackupd from syncronising or connecting to the store.
+# This specifies a program or script script which is run just before each sync, and ideally
+# the full path to the interpreter. It will be run as the same user bbackupd is running as,
+# usually root.
+# The script prints either "now" or a number to STDOUT (and a terminating newline, no quotes).
+# If the result was "now", then the sync will happen. If it's a number, then the script will
+# be asked again in that number of seconds.
+# For example, you could use this on a laptop to only backup when on a specific network.
+
+# SyncAllowScript = /path/to/intepreter/or/exe script-name parameters etc
+
+
+# Where the command socket is created in the filesystem.
+
+CommandSocket = /var/run/bbackupd.sock
+
+
+Server
+{
+ PidFile = /var/run/bbackupd.pid
+}
+
+#
+# BackupLocations specifies which locations on disc should be backed up. Each
+# directory is in the format
+#
+# name
+# {
+# Path = /path/of/directory
+# (optional exclude directives)
+# }
+#
+# 'name' is derived from the Path by the config script, but should merely be
+# unique.
+#
+# The exclude directives are of the form
+#
+# [Exclude|AlwaysInclude][File|Dir][|sRegex] = regex or full pathname
+#
+# (The regex suffix is shown as 'sRegex' to make File or Dir plural)
+#
+# For example:
+#
+# ExcludeDir = /home/guest-user
+# ExcludeFilesRegex = *.(mp3|MP3)\$
+# AlwaysIncludeFile = /home/username/veryimportant.mp3
+#
+# This excludes the directory /home/guest-user from the backup along with all mp3
+# files, except one MP3 file in particular.
+#
+# In general, Exclude excludes a file or directory, unless the directory is
+# explicitly mentioned in a AlwaysInclude directive.
+#
+# If a directive ends in Regex, then it is a regular expression rather than a
+# explicit full pathname. See
+#
+# man 7 re_format
+#
+# for the regex syntax on your platform.
+#
+
+BackupLocations
+{
+__E
+
+# write the dirs to backup
+for my $d (@tobackup)
+{
+ $d =~ m/\A.(.+)\Z/;
+ my $n = $1;
+ $n =~ tr`/`-`;
+
+ my $excludekeys = '';
+ if(substr($enc_key_file, 0, length($d)+1) eq $d.'/')
+ {
+ $excludekeys = "\t\tExcludeFile = $enc_key_file\n";
+ print <<__E;
+
+NOTE: Keys file has been explicitly excluded from the backup.
+
+__E
+ }
+
+ print CONFIG <<__E
+ $n
+ {
+ Path = $d
+$excludekeys }
+__E
+}
+
+print CONFIG "}\n\n";
+close CONFIG;
+
+# explain to the user what they need to do next
+my $daemon_args = ($config_file eq $default_config_location)?'':" $config_file";
+my $ctl_daemon_args = ($config_file eq $default_config_location)?'':" -c $config_file";
+
+print <<__E;
+
+===================================================================
+
+bbackupd basic configuration complete.
+
+What you need to do now...
+
+1) Make a backup of $enc_key_file
+ This should be a secure offsite backup.
+ Without it, you cannot restore backups. Everything else can
+ be replaced. But this cannot.
+ KEEP IT IN A SAFE PLACE, OTHERWISE YOUR BACKUPS ARE USELESS.
+
+2) Send $certificate_request
+ to the administrator of the backup server, and ask for it to
+ be signed.
+
+3) The administrator will send you two files. Install them as
+ $certificate
+ $ca_root_cert
+ after checking their authenticity.
+
+4) You may wish to read the configuration file
+ $config_file
+ and adjust as appropraite.
+
+ There are some notes in it on excluding files you do not
+ wish to be backed up.
+
+5) Review the script
+ $notify_script
+ and check that it will email the right person when the store
+ becomes full. This is important -- when the store is full, no
+ more files will be backed up. You want to know about this.
+
+6) Start the backup daemon with the command
+ /usr/local/bin/bbackupd$daemon_args
+ in /etc/rc.local, or your local equivalent.
+ Note that bbackupd must run as root.
+__E
+if($backup_mode eq 'snapshot')
+{
+ print <<__E;
+
+7) Set up a cron job to run whenever you want a snapshot of the
+ file system to be taken. Run the command
+ /usr/local/bin/bbackupctl -q$ctl_daemon_args sync
+__E
+}
+print <<__E;
+
+===================================================================
+
+Remember to make a secure, offsite backup of your backup keys,
+as described in step 1 above. If you do not, you have no backups.
+
+__E
+
diff --git a/bin/bbackupd/bbackupd.cpp b/bin/bbackupd/bbackupd.cpp
new file mode 100755
index 00000000..ca843105
--- /dev/null
+++ b/bin/bbackupd/bbackupd.cpp
@@ -0,0 +1,26 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbackupd.cpp
+// Purpose: main file for backup daemon
+// Created: 2003/10/11
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+#include "BackupDaemon.h"
+#include "MainHelper.h"
+#include "BoxPortsAndFiles.h"
+
+#include "MemLeakFindOn.h"
+
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_START
+
+ BackupDaemon daemon;
+ return daemon.Main(BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
+
+ MAINHELPER_END
+}
+
diff --git a/bin/bbackupobjdump/bbackupobjdump.cpp b/bin/bbackupobjdump/bbackupobjdump.cpp
new file mode 100644
index 00000000..74fff510
--- /dev/null
+++ b/bin/bbackupobjdump/bbackupobjdump.cpp
@@ -0,0 +1,82 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbackupobjdump.cpp
+// Purpose: Dump contents of backup objects
+// Created: 3/5/04
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+
+#include "MainHelper.h"
+#include "FileStream.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreObjectMagic.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: int main(int, const char *[])
+// Purpose: Main fn for bbackupobjdump
+// Created: 3/5/04
+//
+// --------------------------------------------------------------------------
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_START
+
+ if(argc != 2)
+ {
+ ::printf("Input file not specified.\nUsage: bbackupobjdump <input file>\n");
+ return 1;
+ }
+
+ // Open file
+ FileStream file(argv[1]);
+
+ // Read magic number
+ uint32_t signature;
+ if(file.Read(&signature, sizeof(signature)) != sizeof(signature))
+ {
+ // Too short, can't read signature from it
+ return false;
+ }
+ // Seek back to beginning
+ file.Seek(0, IOStream::SeekType_Absolute);
+
+ // Then... check depending on the type
+ switch(ntohl(signature))
+ {
+ case OBJECTMAGIC_FILE_MAGIC_VALUE_V1:
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ case OBJECTMAGIC_FILE_MAGIC_VALUE_V0:
+#endif
+ BackupStoreFile::DumpFile(stdout, false, file);
+ break;
+
+ case OBJECTMAGIC_DIR_MAGIC_VALUE:
+ {
+ BackupStoreDirectory dir;
+ dir.ReadFromStream(file, IOStream::TimeOutInfinite);
+ dir.Dump(stdout, false);
+ if(dir.CheckAndFix())
+ {
+ ::printf("Directory didn't pass checking\n");
+ }
+ }
+ break;
+
+ default:
+ ::printf("File does not appear to be a valid box backup object.\n");
+ break;
+ }
+
+ MAINHELPER_END
+}
+
diff --git a/bin/bbackupquery/BackupQueries.cpp b/bin/bbackupquery/BackupQueries.cpp
new file mode 100755
index 00000000..0d08f1eb
--- /dev/null
+++ b/bin/bbackupquery/BackupQueries.cpp
@@ -0,0 +1,1700 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupQueries.cpp
+// Purpose: Perform various queries on the backup store server.
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+
+#include <set>
+
+#include "BackupQueries.h"
+#include "Utils.h"
+#include "Configuration.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupStoreFilenameClear.h"
+#include "BackupStoreDirectory.h"
+#include "IOStream.h"
+#include "BoxTimeToText.h"
+#include "FileStream.h"
+#include "BackupStoreFile.h"
+#include "TemporaryDirectory.h"
+#include "FileModificationTime.h"
+#include "BackupClientFileAttributes.h"
+#include "CommonException.h"
+#include "BackupClientRestore.h"
+#include "BackupStoreException.h"
+#include "ExcludeList.h"
+#include "BackupClientMakeExcludeList.h"
+
+#include "MemLeakFindOn.h"
+
+#define COMPARE_RETURN_SAME 1
+#define COMPARE_RETURN_DIFFERENT 2
+#define COMPARE_RETURN_ERROR 3
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::BackupQueries()
+// Purpose: Constructor
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+BackupQueries::BackupQueries(BackupProtocolClient &rConnection, const Configuration &rConfiguration)
+ : mrConnection(rConnection),
+ mrConfiguration(rConfiguration),
+ mQuitNow(false),
+ mRunningAsRoot(false),
+ mWarnedAboutOwnerAttributes(false),
+ mReturnCode(0) // default return code
+{
+ mRunningAsRoot = (::geteuid() == 0);
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::~BackupQueries()
+// Purpose: Destructor
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+BackupQueries::~BackupQueries()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::DoCommand(const char *)
+// Purpose: Perform a command
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+void BackupQueries::DoCommand(const char *Command)
+{
+ // is the command a shell command?
+ if(Command[0] == 's' && Command[1] == 'h' && Command[2] == ' ' && Command[3] != '\0')
+ {
+ // Yes, run shell command
+ ::system(Command + 3);
+ return;
+ }
+
+ // split command into components
+ std::vector<std::string> cmdElements;
+ std::string options;
+ {
+ const char *c = Command;
+ bool inQuoted = false;
+ bool inOptions = false;
+
+ std::string s;
+ while(*c != 0)
+ {
+ // Terminating char?
+ if(*c == ((inQuoted)?'"':' '))
+ {
+ if(!s.empty()) cmdElements.push_back(s);
+ s.resize(0);
+ inQuoted = false;
+ inOptions = false;
+ }
+ else
+ {
+ // No. Start of quoted parameter?
+ if(s.empty() && *c == '"')
+ {
+ inQuoted = true;
+ }
+ // Start of options?
+ else if(s.empty() && *c == '-')
+ {
+ inOptions = true;
+ }
+ else
+ {
+ if(inOptions)
+ {
+ // Option char
+ options += *c;
+ }
+ else
+ {
+ // Normal string char
+ s += *c;
+ }
+ }
+ }
+
+ ++c;
+ }
+ if(!s.empty()) cmdElements.push_back(s);
+ }
+
+ // Check...
+ if(cmdElements.size() < 1)
+ {
+ // blank command
+ return;
+ }
+
+ // Data about commands
+ static const char *commandNames[] = {"quit", "exit", "list", "pwd", "cd", "lcd", "sh", "getobject", "get", "compare", "restore", "help", "usage", "undelete", 0};
+ static const char *validOptions[] = {"", "", "rodIFtsh", "", "od", "", "", "", "i", "alcqE", "dri", "", "", "", 0};
+ #define COMMAND_Quit 0
+ #define COMMAND_Exit 1
+ #define COMMAND_List 2
+ #define COMMAND_pwd 3
+ #define COMMAND_cd 4
+ #define COMMAND_lcd 5
+ #define COMMAND_sh 6
+ #define COMMAND_GetObject 7
+ #define COMMAND_Get 8
+ #define COMMAND_Compare 9
+ #define COMMAND_Restore 10
+ #define COMMAND_Help 11
+ #define COMMAND_Usage 12
+ #define COMMAND_Undelete 13
+ static const char *alias[] = {"ls", 0};
+ static const int aliasIs[] = {COMMAND_List, 0};
+
+ // Work out which command it is...
+ int cmd = 0;
+ while(commandNames[cmd] != 0 && ::strcmp(cmdElements[0].c_str(), commandNames[cmd]) != 0)
+ {
+ cmd++;
+ }
+ if(commandNames[cmd] == 0)
+ {
+ // Check for aliases
+ int a;
+ for(a = 0; alias[a] != 0; ++a)
+ {
+ if(::strcmp(cmdElements[0].c_str(), alias[a]) == 0)
+ {
+ // Found an alias
+ cmd = aliasIs[a];
+ break;
+ }
+ }
+
+ // No such command
+ if(alias[a] == 0)
+ {
+ printf("Unrecognised command: %s\n", Command);
+ return;
+ }
+ }
+
+ // Arguments
+ std::vector<std::string> args(cmdElements.begin() + 1, cmdElements.end());
+
+ // Set up options
+ bool opts[256];
+ for(int o = 0; o < 256; ++o) opts[o] = false;
+ // BLOCK
+ {
+ // options
+ const char *c = options.c_str();
+ while(*c != 0)
+ {
+ // Valid option?
+ if(::strchr(validOptions[cmd], *c) == NULL)
+ {
+ printf("Invalid option '%c' for command %s\n", *c, commandNames[cmd]);
+ return;
+ }
+ opts[(int)*c] = true;
+ ++c;
+ }
+ }
+
+ if(cmd != COMMAND_Quit && cmd != COMMAND_Exit)
+ {
+ // If not a quit command, set the return code to zero
+ SetReturnCode(0);
+ }
+
+ // Handle command
+ switch(cmd)
+ {
+ case COMMAND_Quit:
+ case COMMAND_Exit:
+ mQuitNow = true;
+ break;
+
+ case COMMAND_List:
+ CommandList(args, opts);
+ break;
+
+ case COMMAND_pwd:
+ {
+ // Simple implementation, so do it here
+ std::string dir(GetCurrentDirectoryName());
+ printf("%s (%08llx)\n", dir.c_str(), GetCurrentDirectoryID());
+ }
+ break;
+
+ case COMMAND_cd:
+ CommandChangeDir(args, opts);
+ break;
+
+ case COMMAND_lcd:
+ CommandChangeLocalDir(args);
+ break;
+
+ case COMMAND_sh:
+ printf("The command to run must be specified as an argument.\n");
+ break;
+
+ case COMMAND_GetObject:
+ CommandGetObject(args, opts);
+ break;
+
+ case COMMAND_Get:
+ CommandGet(args, opts);
+ break;
+
+ case COMMAND_Compare:
+ CommandCompare(args, opts);
+ break;
+
+ case COMMAND_Restore:
+ CommandRestore(args, opts);
+ break;
+
+ case COMMAND_Usage:
+ CommandUsage();
+ break;
+
+ case COMMAND_Help:
+ CommandHelp(args);
+ break;
+
+ case COMMAND_Undelete:
+ CommandUndelete(args, opts);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandList(const std::vector<std::string> &, const bool *)
+// Purpose: List directories (optionally recursive)
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandList(const std::vector<std::string> &args, const bool *opts)
+{
+ #define LIST_OPTION_RECURSIVE 'r'
+ #define LIST_OPTION_ALLOWOLD 'o'
+ #define LIST_OPTION_ALLOWDELETED 'd'
+ #define LIST_OPTION_NOOBJECTID 'I'
+ #define LIST_OPTION_NOFLAGS 'F'
+ #define LIST_OPTION_TIMES 't'
+ #define LIST_OPTION_SIZEINBLOCKS 's'
+ #define LIST_OPTION_DISPLAY_HASH 'h'
+
+ // default to using the current directory
+ int64_t rootDir = GetCurrentDirectoryID();
+
+ // name of base directory
+ std::string listRoot; // blank
+
+ // Got a directory in the arguments?
+ if(args.size() > 0)
+ {
+ // Attempt to find the directory
+ rootDir = FindDirectoryObjectID(args[0], opts[LIST_OPTION_ALLOWOLD], opts[LIST_OPTION_ALLOWDELETED]);
+ if(rootDir == 0)
+ {
+ printf("Directory %s not found on store\n", args[0].c_str());
+ return;
+ }
+ }
+
+ // List it
+ List(rootDir, listRoot, opts, true /* first level to list */);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandList2(int64_t, const std::string &, const bool *)
+// Purpose: Do the actual listing of directories and files
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+void BackupQueries::List(int64_t DirID, const std::string &rListRoot, const bool *opts, bool FirstLevel)
+{
+ // Generate exclude flags
+ int16_t excludeFlags = BackupProtocolClientListDirectory::Flags_EXCLUDE_NOTHING;
+ if(!opts[LIST_OPTION_ALLOWOLD]) excludeFlags |= BackupProtocolClientListDirectory::Flags_OldVersion;
+ if(!opts[LIST_OPTION_ALLOWDELETED]) excludeFlags |= BackupProtocolClientListDirectory::Flags_Deleted;
+
+ // Do communication
+ mrConnection.QueryListDirectory(
+ DirID,
+ BackupProtocolClientListDirectory::Flags_INCLUDE_EVERYTHING, // both files and directories
+ excludeFlags,
+ true /* want attributes */);
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(mrConnection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, mrConnection.GetTimeout());
+
+ // Then... display everything
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next()) != 0)
+ {
+ // Display this entry
+ BackupStoreFilenameClear clear(en->GetName());
+ std::string line;
+
+ // Object ID?
+ if(!opts[LIST_OPTION_NOOBJECTID])
+ {
+ // add object ID to line
+ char oid[32];
+ sprintf(oid, "%08llx ", en->GetObjectID());
+ line += oid;
+ }
+
+ // Flags?
+ if(!opts[LIST_OPTION_NOFLAGS])
+ {
+ static const char *flags = BACKUPSTOREDIRECTORY_ENTRY_FLAGS_DISPLAY_NAMES;
+ char displayflags[16];
+ // make sure f is big enough
+ ASSERT(sizeof(displayflags) >= sizeof(BACKUPSTOREDIRECTORY_ENTRY_FLAGS_DISPLAY_NAMES) + 3);
+ // Insert flags
+ char *f = displayflags;
+ const char *t = flags;
+ int16_t en_flags = en->GetFlags();
+ while(*t != 0)
+ {
+ *f = ((en_flags&1) == 0)?'-':*t;
+ en_flags >>= 1;
+ f++;
+ t++;
+ }
+ // attributes flags
+ *(f++) = (en->HasAttributes())?'a':'-';
+ // terminate
+ *(f++) = ' ';
+ *(f++) = '\0';
+ line += displayflags;
+ if(en_flags != 0)
+ {
+ line += "[ERROR: Entry has additional flags set] ";
+ }
+ }
+
+ if(opts[LIST_OPTION_TIMES])
+ {
+ // Show times...
+ line += BoxTimeToISO8601String(en->GetModificationTime());
+ line += ' ';
+ }
+
+ if(opts[LIST_OPTION_DISPLAY_HASH])
+ {
+ char hash[64];
+ ::sprintf(hash, "%016llx ", en->GetAttributesHash());
+ line += hash;
+ }
+
+ if(opts[LIST_OPTION_SIZEINBLOCKS])
+ {
+ char num[32];
+ sprintf(num, "%05lld ", en->GetSizeInBlocks());
+ line += num;
+ }
+
+ // add name
+ if(!FirstLevel)
+ {
+ line += rListRoot;
+ line += '/';
+ }
+ line += clear.GetClearFilename().c_str();
+
+ if(!en->GetName().IsEncrypted())
+ {
+ line += "[FILENAME NOT ENCRYPTED]";
+ }
+
+ // print line
+ printf("%s\n", line.c_str());
+
+ // Directory?
+ if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
+ {
+ // Recurse?
+ if(opts[LIST_OPTION_RECURSIVE])
+ {
+ std::string subroot(rListRoot);
+ if(!FirstLevel) subroot += '/';
+ subroot += clear.GetClearFilename();
+ List(en->GetObjectID(), subroot, opts, false /* not the first level to list */);
+ }
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::FindDirectoryObjectID(const std::string &)
+// Purpose: Find the object ID of a directory on the store, or return 0 for not found.
+// If pStack != 0, the object is set to the stack of directories.
+// Will start from the current directory stack.
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+int64_t BackupQueries::FindDirectoryObjectID(const std::string &rDirName, bool AllowOldVersion,
+ bool AllowDeletedDirs, std::vector<std::pair<std::string, int64_t> > *pStack)
+{
+ // Split up string into elements
+ std::vector<std::string> dirElements;
+ SplitString(rDirName, DIRECTORY_SEPARATOR_ASCHAR, dirElements);
+
+ // Start from current stack, or root, whichever is required
+ std::vector<std::pair<std::string, int64_t> > stack;
+ int64_t dirID = BackupProtocolClientListDirectory::RootDirectory;
+ if(rDirName.size() > 0 && rDirName[0] == '/')
+ {
+ // Root, do nothing
+ }
+ else
+ {
+ // Copy existing stack
+ stack = mDirStack;
+ if(stack.size() > 0)
+ {
+ dirID = stack[stack.size() - 1].second;
+ }
+ }
+
+ // Generate exclude flags
+ int16_t excludeFlags = BackupProtocolClientListDirectory::Flags_EXCLUDE_NOTHING;
+ if(!AllowOldVersion) excludeFlags |= BackupProtocolClientListDirectory::Flags_OldVersion;
+ if(!AllowDeletedDirs) excludeFlags |= BackupProtocolClientListDirectory::Flags_Deleted;
+
+ // Read directories
+ for(unsigned int e = 0; e < dirElements.size(); ++e)
+ {
+ if(dirElements[e].size() > 0)
+ {
+ if(dirElements[e] == ".")
+ {
+ // Ignore.
+ }
+ else if(dirElements[e] == "..")
+ {
+ // Up one!
+ if(stack.size() > 0)
+ {
+ // Remove top element
+ stack.pop_back();
+
+ // New dir ID
+ dirID = (stack.size() > 0)?(stack[stack.size() - 1].second):BackupProtocolClientListDirectory::RootDirectory;
+ }
+ else
+ {
+ // At root anyway
+ dirID = BackupProtocolClientListDirectory::RootDirectory;
+ }
+ }
+ else
+ {
+ // Not blank element. Read current directory.
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(mrConnection.QueryListDirectory(
+ dirID,
+ BackupProtocolClientListDirectory::Flags_Dir, // just directories
+ excludeFlags,
+ true /* want attributes */));
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(mrConnection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, mrConnection.GetTimeout());
+
+ // Then... find the directory within it
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreFilenameClear dirname(dirElements[e]);
+ BackupStoreDirectory::Entry *en = i.FindMatchingClearName(dirname);
+ if(en == 0)
+ {
+ // Not found
+ return 0;
+ }
+
+ // Object ID for next round of searching
+ dirID = en->GetObjectID();
+
+ // Push onto stack
+ stack.push_back(std::pair<std::string, int64_t>(dirElements[e], dirID));
+ }
+ }
+ }
+
+ // If required, copy the new stack to the caller
+ if(pStack)
+ {
+ *pStack = stack;
+ }
+
+ return dirID;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::GetCurrentDirectoryID()
+// Purpose: Returns the ID of the current directory
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+int64_t BackupQueries::GetCurrentDirectoryID()
+{
+ // Special case for root
+ if(mDirStack.size() == 0)
+ {
+ return BackupProtocolClientListDirectory::RootDirectory;
+ }
+
+ // Otherwise, get from the last entry on the stack
+ return mDirStack[mDirStack.size() - 1].second;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::GetCurrentDirectoryName()
+// Purpose: Gets the name of the current directory
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+std::string BackupQueries::GetCurrentDirectoryName()
+{
+ // Special case for root
+ if(mDirStack.size() == 0)
+ {
+ return std::string("/");
+ }
+
+ // Build path
+ std::string r;
+ for(unsigned int l = 0; l < mDirStack.size(); ++l)
+ {
+ r += "/";
+ r += mDirStack[l].first;
+ }
+
+ return r;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandChangeDir(const std::vector<std::string> &)
+// Purpose: Change directory command
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandChangeDir(const std::vector<std::string> &args, const bool *opts)
+{
+ if(args.size() != 1 || args[0].size() == 0)
+ {
+ printf("Incorrect usage.\ncd [-o] [-d] <directory>\n");
+ return;
+ }
+
+ std::vector<std::pair<std::string, int64_t> > newStack;
+ int64_t id = FindDirectoryObjectID(args[0], opts['o'], opts['d'], &newStack);
+
+ if(id == 0)
+ {
+ printf("Directory '%s' not found\n", args[0].c_str());
+ return;
+ }
+
+ // Store new stack
+ mDirStack = newStack;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandChangeLocalDir(const std::vector<std::string> &)
+// Purpose: Change local directory command
+// Created: 2003/10/11
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandChangeLocalDir(const std::vector<std::string> &args)
+{
+ if(args.size() != 1 || args[0].size() == 0)
+ {
+ printf("Incorrect usage.\nlcd <local-directory>\n");
+ return;
+ }
+
+ // Try changing directory
+ if(::chdir(args[0].c_str()) != 0)
+ {
+ printf((errno == ENOENT || errno == ENOTDIR)?"Directory '%s' does not exist\n":"Error changing dir to '%s'\n",
+ args[0].c_str());
+ return;
+ }
+
+ // Report current dir
+ char wd[PATH_MAX];
+ if(::getcwd(wd, PATH_MAX) == 0)
+ {
+ printf("Error getting current directory\n");
+ return;
+ }
+
+ printf("Local current directory is now '%s'\n", wd);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandGetObject(const std::vector<std::string> &, const bool *)
+// Purpose: Gets an object without any translation.
+// Created: 2003/10/11
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandGetObject(const std::vector<std::string> &args, const bool *opts)
+{
+ // Check args
+ if(args.size() != 2)
+ {
+ printf("Incorrect usage.\ngetobject <object-id> <local-filename>\n");
+ return;
+ }
+
+ int64_t id = ::strtoll(args[0].c_str(), 0, 16);
+ if(id == LLONG_MIN || id == LLONG_MAX || id == 0)
+ {
+ printf("Not a valid object ID (specified in hex)\n");
+ return;
+ }
+
+ // Does file exist?
+ struct stat st;
+ if(::stat(args[1].c_str(), &st) == 0 || errno != ENOENT)
+ {
+ printf("The local file %s already exists\n", args[1].c_str());
+ return;
+ }
+
+ // Open file
+ FileStream out(args[1].c_str(), O_WRONLY | O_CREAT | O_EXCL);
+
+ // Request that object
+ try
+ {
+ // Request object
+ std::auto_ptr<BackupProtocolClientSuccess> getobj(mrConnection.QueryGetObject(id));
+ if(getobj->GetObjectID() != BackupProtocolClientGetObject::NoObject)
+ {
+ // Stream that object out to the file
+ std::auto_ptr<IOStream> objectStream(mrConnection.ReceiveStream());
+ objectStream->CopyStreamTo(out);
+
+ printf("Object ID %08llx fetched successfully.\n", id);
+ }
+ else
+ {
+ printf("Object does not exist on store.\n");
+ ::unlink(args[1].c_str());
+ }
+ }
+ catch(...)
+ {
+ ::unlink(args[1].c_str());
+ printf("Error occured fetching object.\n");
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandGet(const std::vector<std::string> &, const bool *)
+// Purpose: Command to get a file from the store
+// Created: 2003/10/12
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandGet(const std::vector<std::string> &args, const bool *opts)
+{
+ // At least one argument?
+ // Check args
+ if(args.size() < 1 || (opts['i'] && args.size() != 2) || args.size() > 2)
+ {
+ printf("Incorrect usage.\ngetobject <object-id> <local-filename>\n or get -i <object-id> <local-filename>\n");
+ return;
+ }
+
+ // Find object ID somehow
+ int64_t id;
+ std::string localName;
+ // BLOCK
+ {
+ // Need to look it up in the current directory
+ mrConnection.QueryListDirectory(
+ GetCurrentDirectoryID(),
+ BackupProtocolClientListDirectory::Flags_File, // just files
+ (opts['i'])?(BackupProtocolClientListDirectory::Flags_EXCLUDE_NOTHING):(BackupProtocolClientListDirectory::Flags_OldVersion | BackupProtocolClientListDirectory::Flags_Deleted), // only current versions
+ false /* don't want attributes */);
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(mrConnection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, mrConnection.GetTimeout());
+
+ if(opts['i'])
+ {
+ // Specified as ID.
+ id = ::strtoll(args[0].c_str(), 0, 16);
+ if(id == LLONG_MIN || id == LLONG_MAX || id == 0)
+ {
+ printf("Not a valid object ID (specified in hex)\n");
+ return;
+ }
+
+ // Check that the item is actually in the directory
+ if(dir.FindEntryByID(id) == 0)
+ {
+ printf("ID '%08llx' not found in current directory on store.\n(You can only download objects by ID from the current directory.)\n", id);
+ return;
+ }
+
+ // Must have a local name in the arguments (check at beginning of function ensures this)
+ localName = args[1];
+ }
+ else
+ {
+ // Specified by name, find the object in the directory to get the ID
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreFilenameClear fn(args[0]);
+ BackupStoreDirectory::Entry *en = i.FindMatchingClearName(fn);
+
+ if(en == 0)
+ {
+ printf("Filename '%s' not found in current directory on store.\n(Subdirectories in path not searched.)\n", args[0].c_str());
+ return;
+ }
+
+ id = en->GetObjectID();
+
+ // Local name is the last argument, which is either the looked up filename, or
+ // a filename specified by the user.
+ localName = args[args.size() - 1];
+ }
+ }
+
+ // Does local file already exist? (don't want to overwrite)
+ struct stat st;
+ if(::stat(localName.c_str(), &st) == 0 || errno != ENOENT)
+ {
+ printf("The local file %s already exists, will not overwrite it.\n", localName.c_str());
+ return;
+ }
+
+ // Request it from the store
+ try
+ {
+ // Request object
+ mrConnection.QueryGetFile(GetCurrentDirectoryID(), id);
+
+ // Stream containing encoded file
+ std::auto_ptr<IOStream> objectStream(mrConnection.ReceiveStream());
+
+ // Decode it
+ BackupStoreFile::DecodeFile(*objectStream, localName.c_str(), mrConnection.GetTimeout());
+
+ // Done.
+ printf("Object ID %08llx fetched sucessfully.\n", id);
+ }
+ catch(...)
+ {
+ ::unlink(args[1].c_str());
+ printf("Error occured fetching file.\n");
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CompareParams::CompareParams()
+// Purpose: Constructor
+// Created: 29/1/04
+//
+// --------------------------------------------------------------------------
+BackupQueries::CompareParams::CompareParams()
+ : mQuickCompare(false),
+ mIgnoreExcludes(false),
+ mDifferences(0),
+ mDifferencesExplainedByModTime(0),
+ mExcludedDirs(0),
+ mExcludedFiles(0),
+ mpExcludeFiles(0),
+ mpExcludeDirs(0),
+ mLatestFileUploadTime(0)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CompareParams::~CompareParams()
+// Purpose: Destructor
+// Created: 29/1/04
+//
+// --------------------------------------------------------------------------
+BackupQueries::CompareParams::~CompareParams()
+{
+ DeleteExcludeLists();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CompareParams::DeleteExcludeLists()
+// Purpose: Delete the include lists contained
+// Created: 29/1/04
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CompareParams::DeleteExcludeLists()
+{
+ if(mpExcludeFiles != 0)
+ {
+ delete mpExcludeFiles;
+ mpExcludeFiles = 0;
+ }
+ if(mpExcludeDirs != 0)
+ {
+ delete mpExcludeDirs;
+ mpExcludeDirs = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandCompare(const std::vector<std::string> &, const bool *)
+// Purpose: Command to compare data on the store with local data
+// Created: 2003/10/12
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandCompare(const std::vector<std::string> &args, const bool *opts)
+{
+ // Parameters, including count of differences
+ BackupQueries::CompareParams params;
+ params.mQuickCompare = opts['q'];
+ params.mIgnoreExcludes = opts['E'];
+
+ // Try and work out the time before which all files should be on the server
+ {
+ std::string syncTimeFilename(mrConfiguration.GetKeyValue("DataDirectory") + DIRECTORY_SEPARATOR_ASCHAR);
+ syncTimeFilename += "last_sync_start";
+ // Stat it to get file time
+ struct stat st;
+ if(::stat(syncTimeFilename.c_str(), &st) == 0)
+ {
+ // Files modified after this time shouldn't be on the server, so report errors slightly differently
+ params.mLatestFileUploadTime = FileModificationTime(st)
+ - SecondsToBoxTime((uint32_t)mrConfiguration.GetKeyValueInt("MinimumFileAge"));
+ }
+ else
+ {
+ printf("Warning: couldn't determine the time of the last syncronisation -- checks not performed.\n");
+ }
+ }
+
+ // Quick compare?
+ if(params.mQuickCompare)
+ {
+ printf("WARNING: Quick compare used -- file attributes are not checked.\n");
+ }
+
+ if(!opts['l'] && opts['a'] && args.size() == 0)
+ {
+ // Compare all locations
+ const Configuration &locations(mrConfiguration.GetSubConfiguration("BackupLocations"));
+ for(std::list<std::pair<std::string, Configuration> >::const_iterator i = locations.mSubConfigurations.begin();
+ i != locations.mSubConfigurations.end(); ++i)
+ {
+ CompareLocation(i->first, params);
+ }
+ }
+ else if(opts['l'] && !opts['a'] && args.size() == 1)
+ {
+ // Compare one location
+ CompareLocation(args[0], params);
+ }
+ else if(!opts['l'] && !opts['a'] && args.size() == 2)
+ {
+ // Compare directory to directory
+
+ // Can't be bothered to do all the hard work to work out which location it's on, and hence which exclude list
+ if(!params.mIgnoreExcludes)
+ {
+ printf("Cannot use excludes on directory to directory comparison -- use -E flag to specify ignored excludes\n");
+ return;
+ }
+ else
+ {
+ // Do compare
+ Compare(args[0], args[1], params);
+ }
+ }
+ else
+ {
+ printf("Incorrect usage.\ncompare -a\n or compare -l <location-name>\n or compare <store-dir-name> <local-dir-name>\n");
+ return;
+ }
+
+ printf("\n[ %d (of %d) differences probably due to file modifications after the last upload ]\nDifferences: %d (%d dirs excluded, %d files excluded)\n",
+ params.mDifferencesExplainedByModTime, params.mDifferences, params.mDifferences, params.mExcludedDirs, params.mExcludedFiles);
+
+ // Set return code?
+ if(opts['c'])
+ {
+ SetReturnCode((params.mDifferences == 0)?COMPARE_RETURN_SAME:COMPARE_RETURN_DIFFERENT);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CompareLocation(const std::string &, BackupQueries::CompareParams &)
+// Purpose: Compare a location
+// Created: 2003/10/13
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CompareLocation(const std::string &rLocation, BackupQueries::CompareParams &rParams)
+{
+ // Find the location's sub configuration
+ const Configuration &locations(mrConfiguration.GetSubConfiguration("BackupLocations"));
+ if(!locations.SubConfigurationExists(rLocation.c_str()))
+ {
+ printf("Location %s does not exist.\n", rLocation.c_str());
+ return;
+ }
+ const Configuration &loc(locations.GetSubConfiguration(rLocation.c_str()));
+
+ try
+ {
+ // Generate the exclude lists
+ if(!rParams.mIgnoreExcludes)
+ {
+ rParams.mpExcludeFiles = BackupClientMakeExcludeList_Files(loc);
+ rParams.mpExcludeDirs = BackupClientMakeExcludeList_Dirs(loc);
+ }
+
+ // Then get it compared
+ Compare(std::string("/") + rLocation, loc.GetKeyValue("Path"), rParams);
+ }
+ catch(...)
+ {
+ // Clean up
+ rParams.DeleteExcludeLists();
+ throw;
+ }
+
+ // Delete exclude lists
+ rParams.DeleteExcludeLists();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::Compare(const std::string &, const std::string &, BackupQueries::CompareParams &)
+// Purpose: Compare a store directory against a local directory
+// Created: 2003/10/13
+//
+// --------------------------------------------------------------------------
+void BackupQueries::Compare(const std::string &rStoreDir, const std::string &rLocalDir, BackupQueries::CompareParams &rParams)
+{
+ // Get the directory ID of the directory -- only use current data
+ int64_t dirID = FindDirectoryObjectID(rStoreDir);
+
+ // Found?
+ if(dirID == 0)
+ {
+ printf("Local directory '%s' exists, but server directory '%s' does not exist\n", rLocalDir.c_str(), rStoreDir.c_str());
+ rParams.mDifferences ++;
+ return;
+ }
+
+ // Go!
+ Compare(dirID, rStoreDir, rLocalDir, rParams);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::Compare(int64_t, const std::string &, BackupQueries::CompareParams &)
+// Purpose: Compare a store directory against a local directory
+// Created: 2003/10/13
+//
+// --------------------------------------------------------------------------
+void BackupQueries::Compare(int64_t DirID, const std::string &rStoreDir, const std::string &rLocalDir, BackupQueries::CompareParams &rParams)
+{
+ // Get info on the local directory
+ struct stat st;
+ if(::lstat(rLocalDir.c_str(), &st) != 0)
+ {
+ // What kind of error?
+ if(errno == ENOTDIR)
+ {
+ printf("Local object '%s' is a file, server object '%s' is a directory\n", rLocalDir.c_str(), rStoreDir.c_str());
+ rParams.mDifferences ++;
+ }
+ else if(errno == ENOENT)
+ {
+ printf("Local directory '%s' does not exist (compared to server directory '%s')\n", rLocalDir.c_str(), rStoreDir.c_str());
+ }
+ else
+ {
+ printf("ERROR: stat on local dir '%s'\n", rLocalDir.c_str());
+ }
+ return;
+ }
+
+ // Get the directory listing from the store
+ mrConnection.QueryListDirectory(
+ DirID,
+ BackupProtocolClientListDirectory::Flags_INCLUDE_EVERYTHING, // get everything
+ BackupProtocolClientListDirectory::Flags_OldVersion | BackupProtocolClientListDirectory::Flags_Deleted, // except for old versions and deleted files
+ true /* want attributes */);
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(mrConnection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, mrConnection.GetTimeout());
+
+ // Test out the attributes
+ if(!dir.HasAttributes())
+ {
+ printf("Store directory '%s' doesn't have attributes.\n", rStoreDir.c_str());
+ }
+ else
+ {
+ // Fetch the attributes
+ const StreamableMemBlock &storeAttr(dir.GetAttributes());
+ BackupClientFileAttributes attr(storeAttr);
+
+ // Get attributes of local directory
+ BackupClientFileAttributes localAttr;
+ localAttr.ReadAttributes(rLocalDir.c_str(), true /* directories have zero mod times */);
+
+ if(!(attr.Compare(localAttr, true, true /* ignore modification times */)))
+ {
+ printf("Local directory '%s' has different attributes to store directory '%s'.\n",
+ rLocalDir.c_str(), rStoreDir.c_str());
+ rParams.mDifferences ++;
+ }
+ }
+
+ // Open the local directory
+ DIR *dirhandle = ::opendir(rLocalDir.c_str());
+ if(dirhandle == 0)
+ {
+ printf("ERROR: opendir on local dir '%s'\n", rLocalDir.c_str());
+ return;
+ }
+ try
+ {
+ // Read the files and directories into sets
+ std::set<std::string> localFiles;
+ std::set<std::string> localDirs;
+ struct dirent *localDirEn = 0;
+ while((localDirEn = readdir(dirhandle)) != 0)
+ {
+ // Not . and ..!
+ if(localDirEn->d_name[0] == '.' &&
+ (localDirEn->d_name[1] == '\0' || (localDirEn->d_name[1] == '.' && localDirEn->d_name[2] == '\0')))
+ {
+ // ignore, it's . or ..
+ continue;
+ }
+
+#ifdef PLATFORM_dirent_BROKEN_d_type
+ std::string fn(rLocalDir);
+ fn += '/';
+ fn += localDirEn->d_name;
+ struct stat st;
+ if(::lstat(fn.c_str(), &st) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ // Entry -- file or dir?
+ if(S_ISREG(st.st_mode) || S_ISLNK(st.st_mode))
+ {
+ // File or symbolic link
+ localFiles.insert(std::string(localDirEn->d_name));
+ }
+ else if(S_ISDIR(st.st_mode))
+ {
+ // Directory
+ localDirs.insert(std::string(localDirEn->d_name));
+ }
+#else
+ // Entry -- file or dir?
+ if(localDirEn->d_type == DT_REG || localDirEn->d_type == DT_LNK)
+ {
+ // File or symbolic link
+ localFiles.insert(std::string(localDirEn->d_name));
+ }
+ else if(localDirEn->d_type == DT_DIR)
+ {
+ // Directory
+ localDirs.insert(std::string(localDirEn->d_name));
+ }
+#endif // PLATFORM_dirent_BROKEN_d_type
+ }
+ // Close directory
+ if(::closedir(dirhandle) != 0)
+ {
+ printf("ERROR: closedir on local dir '%s'\n", rLocalDir.c_str());
+ }
+ dirhandle = 0;
+
+ // Do the same for the store directories
+ std::set<std::pair<std::string, BackupStoreDirectory::Entry *> > storeFiles;
+ std::set<std::pair<std::string, BackupStoreDirectory::Entry *> > storeDirs;
+
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *storeDirEn = 0;
+ while((storeDirEn = i.Next()) != 0)
+ {
+ // Decrypt filename
+ BackupStoreFilenameClear name(storeDirEn->GetName());
+
+ // What is it?
+ if((storeDirEn->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == BackupStoreDirectory::Entry::Flags_File)
+ {
+ // File
+ storeFiles.insert(std::pair<std::string, BackupStoreDirectory::Entry *>(name.GetClearFilename(), storeDirEn));
+ }
+ else
+ {
+ // Dir
+ storeDirs.insert(std::pair<std::string, BackupStoreDirectory::Entry *>(name.GetClearFilename(), storeDirEn));
+ }
+ }
+
+ // Now compare files.
+ for(std::set<std::pair<std::string, BackupStoreDirectory::Entry *> >::const_iterator i = storeFiles.begin(); i != storeFiles.end(); ++i)
+ {
+ // Does the file exist locally?
+ std::set<std::string>::const_iterator local(localFiles.find(i->first));
+ if(local == localFiles.end())
+ {
+ // Not found -- report
+ printf("Local file '%s/%s' does not exist, but store file '%s/%s' does.\n",
+ rLocalDir.c_str(), i->first.c_str(), rStoreDir.c_str(), i->first.c_str());
+ rParams.mDifferences ++;
+ }
+ else
+ {
+ try
+ {
+ // make local name of file for comparison
+ std::string localName(rLocalDir + DIRECTORY_SEPARATOR + i->first);
+
+ // Files the same flag?
+ bool equal = true;
+
+ // File modified after last sync flag
+ bool modifiedAfterLastSync = false;
+
+ if(rParams.mQuickCompare)
+ {
+ // Compare file -- fetch it
+ mrConnection.QueryGetBlockIndexByID(i->second->GetObjectID());
+
+ // Stream containing block index
+ std::auto_ptr<IOStream> blockIndexStream(mrConnection.ReceiveStream());
+
+ // Compare
+ equal = BackupStoreFile::CompareFileContentsAgainstBlockIndex(localName.c_str(), *blockIndexStream, mrConnection.GetTimeout());
+ }
+ else
+ {
+ // Compare file -- fetch it
+ mrConnection.QueryGetFile(DirID, i->second->GetObjectID());
+
+ // Stream containing encoded file
+ std::auto_ptr<IOStream> objectStream(mrConnection.ReceiveStream());
+
+ // Decode it
+ std::auto_ptr<BackupStoreFile::DecodedStream> fileOnServerStream;
+ // Got additional attibutes?
+ if(i->second->HasAttributes())
+ {
+ // Use these attributes
+ const StreamableMemBlock &storeAttr(i->second->GetAttributes());
+ BackupClientFileAttributes attr(storeAttr);
+ fileOnServerStream.reset(BackupStoreFile::DecodeFileStream(*objectStream, mrConnection.GetTimeout(), &attr).release());
+ }
+ else
+ {
+ // Use attributes stored in file
+ fileOnServerStream.reset(BackupStoreFile::DecodeFileStream(*objectStream, mrConnection.GetTimeout()).release());
+ }
+
+ // Should always be something in the auto_ptr, it's how the interface is defined. But be paranoid.
+ if(!fileOnServerStream.get())
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ // Compare attributes
+ BackupClientFileAttributes localAttr;
+ box_time_t fileModTime = 0;
+ localAttr.ReadAttributes(localName.c_str(), false /* don't zero mod times */, &fileModTime);
+ modifiedAfterLastSync = (fileModTime > rParams.mLatestFileUploadTime);
+ if(!localAttr.Compare(fileOnServerStream->GetAttributes(),
+ true /* ignore attr mod time */,
+ fileOnServerStream->IsSymLink() /* ignore modification time if it's a symlink */))
+ {
+ printf("Local file '%s/%s' has different attributes to store file '%s/%s'.\n",
+ rLocalDir.c_str(), i->first.c_str(), rStoreDir.c_str(), i->first.c_str());
+ rParams.mDifferences ++;
+ if(modifiedAfterLastSync)
+ {
+ rParams.mDifferencesExplainedByModTime ++;
+ printf("(the file above was modified after the last sync time -- might be reason for difference)\n");
+ }
+ else if(i->second->HasAttributes())
+ {
+ printf("(the file above has had new attributes applied)\n");
+ }
+ }
+
+ // Compare contents, if it's a regular file not a link
+ // Remember, we MUST read the entire stream from the server.
+ if(!fileOnServerStream->IsSymLink())
+ {
+ // Open the local file
+ FileStream l(localName.c_str());
+
+ // Size
+ IOStream::pos_type fileSizeLocal = l.BytesLeftToRead();
+ IOStream::pos_type fileSizeServer = 0;
+
+ // Test the contents
+ char buf1[2048];
+ char buf2[2048];
+ while(fileOnServerStream->StreamDataLeft() && l.StreamDataLeft())
+ {
+ int size = fileOnServerStream->Read(buf1, sizeof(buf1), mrConnection.GetTimeout());
+ fileSizeServer += size;
+
+ if(l.Read(buf2, size) != size
+ || ::memcmp(buf1, buf2, size) != 0)
+ {
+ equal = false;
+ break;
+ }
+ }
+
+ // Check read all the data from the server and file -- can't be equal if local and remote aren't the same length
+ // Can't use StreamDataLeft() test on file, because if it's the same size, it won't know
+ // it's EOF yet.
+ if(fileOnServerStream->StreamDataLeft() || fileSizeServer != fileSizeLocal)
+ {
+ equal = false;
+ }
+
+ // Must always read the entire decoded string, if it's not a symlink
+ if(fileOnServerStream->StreamDataLeft())
+ {
+ // Absorb all the data remaining
+ char buffer[2048];
+ while(fileOnServerStream->StreamDataLeft())
+ {
+ fileOnServerStream->Read(buffer, sizeof(buffer), mrConnection.GetTimeout());
+ }
+ }
+ }
+ }
+
+ // Report if not equal.
+ if(!equal)
+ {
+ printf("Local file '%s/%s' has different contents to store file '%s/%s'.\n",
+ rLocalDir.c_str(), i->first.c_str(), rStoreDir.c_str(), i->first.c_str());
+ rParams.mDifferences ++;
+ if(modifiedAfterLastSync)
+ {
+ rParams.mDifferencesExplainedByModTime ++;
+ printf("(the file above was modified after the last sync time -- might be reason for difference)\n");
+ }
+ else if(i->second->HasAttributes())
+ {
+ printf("(the file above has had new attributes applied)\n");
+ }
+ }
+ }
+ catch(BoxException &e)
+ {
+ printf("ERROR: (%d/%d) during file fetch and comparsion for '%s/%s'\n",
+ e.GetType(),
+ e.GetSubType(),
+ rStoreDir.c_str(), i->first.c_str());
+ }
+ catch(...)
+ {
+ printf("ERROR: (unknown) during file fetch and comparsion for '%s/%s'\n", rStoreDir.c_str(), i->first.c_str());
+ }
+
+ // Remove from set so that we know it's been compared
+ localFiles.erase(local);
+ }
+ }
+
+ // Report any files which exist on the locally, but not on the store
+ for(std::set<std::string>::const_iterator i = localFiles.begin(); i != localFiles.end(); ++i)
+ {
+ std::string localName(rLocalDir + DIRECTORY_SEPARATOR + *i);
+ // Should this be ignored (ie is excluded)?
+ if(rParams.mpExcludeFiles == 0 || !(rParams.mpExcludeFiles->IsExcluded(localName)))
+ {
+ printf("Local file '%s/%s' exists, but store file '%s/%s' does not exist.\n",
+ rLocalDir.c_str(), (*i).c_str(), rStoreDir.c_str(), (*i).c_str());
+ rParams.mDifferences ++;
+
+ // Check the file modification time
+ {
+ struct stat st;
+ if(::stat(localName.c_str(), &st) == 0)
+ {
+ if(FileModificationTime(st) > rParams.mLatestFileUploadTime)
+ {
+ rParams.mDifferencesExplainedByModTime ++;
+ printf("(the file above was modified after the last sync time -- might be reason for difference)\n");
+ }
+ }
+ }
+ }
+ else
+ {
+ rParams.mExcludedFiles ++;
+ }
+ }
+
+ // Finished with the files, clear the sets to reduce memory usage slightly
+ localFiles.clear();
+ storeFiles.clear();
+
+ // Now do the directories, recusively to check subdirectories
+ for(std::set<std::pair<std::string, BackupStoreDirectory::Entry *> >::const_iterator i = storeDirs.begin(); i != storeDirs.end(); ++i)
+ {
+ // Does the directory exist locally?
+ std::set<std::string>::const_iterator local(localDirs.find(i->first));
+ if(local == localDirs.end())
+ {
+ // Not found -- report
+ printf("Local directory '%s/%s' does not exist, but store directory '%s/%s' does.\n",
+ rLocalDir.c_str(), i->first.c_str(), rStoreDir.c_str(), i->first.c_str());
+ rParams.mDifferences ++;
+ }
+ else
+ {
+ // Compare directory
+ Compare(i->second->GetObjectID(), rStoreDir + "/" + i->first, rLocalDir + DIRECTORY_SEPARATOR + i->first, rParams);
+
+ // Remove from set so that we know it's been compared
+ localDirs.erase(local);
+ }
+ }
+
+ // Report any files which exist on the locally, but not on the store
+ for(std::set<std::string>::const_iterator i = localDirs.begin(); i != localDirs.end(); ++i)
+ {
+ std::string localName(rLocalDir + DIRECTORY_SEPARATOR + *i);
+ // Should this be ignored (ie is excluded)?
+ if(rParams.mpExcludeDirs == 0 || !(rParams.mpExcludeDirs->IsExcluded(localName)))
+ {
+ printf("Local directory '%s/%s' exists, but store directory '%s/%s' does not exist.\n",
+ rLocalDir.c_str(), (*i).c_str(), rStoreDir.c_str(), (*i).c_str());
+ rParams.mDifferences ++;
+ }
+ else
+ {
+ rParams.mExcludedDirs ++;
+ }
+ }
+
+ }
+ catch(...)
+ {
+ if(dirhandle != 0)
+ {
+ ::closedir(dirhandle);
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandRestore(const std::vector<std::string> &, const bool *)
+// Purpose: Restore a directory
+// Created: 23/11/03
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandRestore(const std::vector<std::string> &args, const bool *opts)
+{
+ // Check arguments
+ if(args.size() != 2)
+ {
+ printf("Incorrect usage.\nrestore [-d] [-r] [-i] <directory-name> <local-directory-name>\n");
+ return;
+ }
+
+ // Restoring deleted things?
+ bool restoreDeleted = opts['d'];
+
+ // Get directory ID
+ int64_t dirID = 0;
+ if(opts['i'])
+ {
+ // Specified as ID.
+ dirID = ::strtoll(args[0].c_str(), 0, 16);
+ if(dirID == LLONG_MIN || dirID == LLONG_MAX || dirID == 0)
+ {
+ printf("Not a valid object ID (specified in hex)\n");
+ return;
+ }
+ }
+ else
+ {
+ // Look up directory ID
+ dirID = FindDirectoryObjectID(args[0], false /* no old versions */, restoreDeleted /* find deleted dirs */);
+ }
+
+ // Allowable?
+ if(dirID == 0)
+ {
+ printf("Directory %s not found on server\n", args[0].c_str());
+ return;
+ }
+ if(dirID == BackupProtocolClientListDirectory::RootDirectory)
+ {
+ printf("Cannot restore the root directory -- restore locations individually.\n");
+ return;
+ }
+
+ // Go and restore...
+ switch(BackupClientRestore(mrConnection, dirID, args[1].c_str(), true /* print progress dots */, restoreDeleted,
+ false /* don't undelete after restore! */, opts['r'] /* resume? */))
+ {
+ case Restore_Complete:
+ printf("Restore complete\n");
+ break;
+
+ case Restore_ResumePossible:
+ printf("Resume possible -- repeat command with -r flag to resume\n");
+ break;
+
+ case Restore_TargetExists:
+ printf("The target directory exists. You cannot restore over an existing directory.\n");
+ break;
+
+ default:
+ printf("ERROR: Unknown restore result.\n");
+ break;
+ }
+}
+
+
+
+// These are autogenerated by a script.
+extern char *help_commands[];
+extern char *help_text[];
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandHelp(const std::vector<std::string> &args)
+// Purpose: Display help on commands
+// Created: 15/2/04
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandHelp(const std::vector<std::string> &args)
+{
+ if(args.size() == 0)
+ {
+ // Display a list of all commands
+ printf("Available commands are:\n");
+ for(int c = 0; help_commands[c] != 0; ++c)
+ {
+ printf(" %s\n", help_commands[c]);
+ }
+ printf("Type \"help <command>\" for more information on a command.\n\n");
+ }
+ else
+ {
+ // Display help on a particular command
+ int c;
+ for(c = 0; help_commands[c] != 0; ++c)
+ {
+ if(::strcmp(help_commands[c], args[0].c_str()) == 0)
+ {
+ // Found the command, print help
+ printf("\n%s\n", help_text[c]);
+ break;
+ }
+ }
+ if(help_commands[c] == 0)
+ {
+ printf("No help found for command '%s'\n", args[0].c_str());
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandUsage()
+// Purpose: Display storage space used on server
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandUsage()
+{
+ // Request full details from the server
+ std::auto_ptr<BackupProtocolClientAccountUsage> usage(mrConnection.QueryGetAccountUsage());
+
+ // Display each entry in turn
+ int64_t hardLimit = usage->GetBlocksHardLimit();
+ int32_t blockSize = usage->GetBlockSize();
+ CommandUsageDisplayEntry("Used", usage->GetBlocksUsed(), hardLimit, blockSize);
+ CommandUsageDisplayEntry("Old files", usage->GetBlocksInOldFiles(), hardLimit, blockSize);
+ CommandUsageDisplayEntry("Deleted files", usage->GetBlocksInDeletedFiles(), hardLimit, blockSize);
+ CommandUsageDisplayEntry("Directories", usage->GetBlocksInDirectories(), hardLimit, blockSize);
+ CommandUsageDisplayEntry("Soft limit", usage->GetBlocksSoftLimit(), hardLimit, blockSize);
+ CommandUsageDisplayEntry("Hard limit", hardLimit, hardLimit, blockSize);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandUsageDisplayEntry(const char *, int64_t, int64_t, int32_t)
+// Purpose: Display an entry in the usage table
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandUsageDisplayEntry(const char *Name, int64_t Size, int64_t HardLimit, int32_t BlockSize)
+{
+ // Calculate size in Mb
+ double mb = (((double)Size) * ((double)BlockSize)) / ((double)(1024*1024));
+ int64_t percent = (Size * 100) / HardLimit;
+
+ // Bar graph
+ char bar[41];
+ unsigned int b = (int)((Size * (sizeof(bar)-1)) / HardLimit);
+ if(b > sizeof(bar)-1) {b = sizeof(bar)-1;}
+ for(unsigned int l = 0; l < b; l++)
+ {
+ bar[l] = '*';
+ }
+ bar[b] = '\0';
+
+ // Print the entryj
+ ::printf("%14s %10.1fMb %3d%% %s\n", Name, mb, (int32_t)percent, bar);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupQueries::CommandUndelete(const std::vector<std::string> &, const bool *)
+// Purpose: Undelete a directory
+// Created: 23/11/03
+//
+// --------------------------------------------------------------------------
+void BackupQueries::CommandUndelete(const std::vector<std::string> &args, const bool *opts)
+{
+ // Check arguments
+ if(args.size() != 1)
+ {
+ printf("Incorrect usage.\nundelete <directory-name>\n");
+ return;
+ }
+
+ // Get directory ID
+ int64_t dirID = FindDirectoryObjectID(args[0], false /* no old versions */, true /* find deleted dirs */);
+
+ // Allowable?
+ if(dirID == 0)
+ {
+ printf("Directory %s not found on server\n", args[0].c_str());
+ return;
+ }
+ if(dirID == BackupProtocolClientListDirectory::RootDirectory)
+ {
+ printf("Cannot restore the root directory -- restore locations individually.\n");
+ return;
+ }
+
+ // Undelete
+ mrConnection.QueryUndeleteDirectory(dirID);
+}
+
+
+
+
+
diff --git a/bin/bbackupquery/BackupQueries.h b/bin/bbackupquery/BackupQueries.h
new file mode 100755
index 00000000..e84de6ab
--- /dev/null
+++ b/bin/bbackupquery/BackupQueries.h
@@ -0,0 +1,101 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupQueries.h
+// Purpose: Perform various queries on the backup store server.
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPQUERIES__H
+#define BACKUPQUERIES__H
+
+#include <vector>
+#include <string>
+
+#include "BoxTime.h"
+
+class BackupProtocolClient;
+class Configuration;
+class ExcludeList;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupQueries
+// Purpose: Perform various queries on the backup store server.
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+class BackupQueries
+{
+public:
+ BackupQueries(BackupProtocolClient &rConnection, const Configuration &rConfiguration);
+ ~BackupQueries();
+private:
+ BackupQueries(const BackupQueries &);
+public:
+
+ void DoCommand(const char *Command);
+
+ // Ready to stop?
+ bool Stop() {return mQuitNow;}
+
+ // Return code?
+ int GetReturnCode() {return mReturnCode;}
+
+private:
+ // Commands
+ void CommandList(const std::vector<std::string> &args, const bool *opts);
+ void CommandChangeDir(const std::vector<std::string> &args, const bool *opts);
+ void CommandChangeLocalDir(const std::vector<std::string> &args);
+ void CommandGetObject(const std::vector<std::string> &args, const bool *opts);
+ void CommandGet(const std::vector<std::string> &args, const bool *opts);
+ void CommandCompare(const std::vector<std::string> &args, const bool *opts);
+ void CommandRestore(const std::vector<std::string> &args, const bool *opts);
+ void CommandUndelete(const std::vector<std::string> &args, const bool *opts);
+ void CommandUsage();
+ void CommandUsageDisplayEntry(const char *Name, int64_t Size, int64_t HardLimit, int32_t BlockSize);
+ void CommandHelp(const std::vector<std::string> &args);
+
+ // Implementations
+ void List(int64_t DirID, const std::string &rListRoot, const bool *opts, bool FirstLevel);
+ class CompareParams
+ {
+ public:
+ CompareParams();
+ ~CompareParams();
+ void DeleteExcludeLists();
+ bool mQuickCompare;
+ bool mIgnoreExcludes;
+ int mDifferences;
+ int mDifferencesExplainedByModTime;
+ int mExcludedDirs;
+ int mExcludedFiles;
+ const ExcludeList *mpExcludeFiles;
+ const ExcludeList *mpExcludeDirs;
+ box_time_t mLatestFileUploadTime;
+ };
+ void CompareLocation(const std::string &rLocation, CompareParams &rParams);
+ void Compare(const std::string &rStoreDir, const std::string &rLocalDir, CompareParams &rParams);
+ void Compare(int64_t DirID, const std::string &rStoreDir, const std::string &rLocalDir, CompareParams &rParams);
+
+ // Utility functions
+ int64_t FindDirectoryObjectID(const std::string &rDirName, bool AllowOldVersion = false,
+ bool AllowDeletedDirs = false, std::vector<std::pair<std::string, int64_t> > *pStack = 0);
+ int64_t GetCurrentDirectoryID();
+ std::string GetCurrentDirectoryName();
+ void SetReturnCode(int code) {mReturnCode = code;}
+
+private:
+ BackupProtocolClient &mrConnection;
+ const Configuration &mrConfiguration;
+ bool mQuitNow;
+ std::vector<std::pair<std::string, int64_t> > mDirStack;
+ bool mRunningAsRoot;
+ bool mWarnedAboutOwnerAttributes;
+ int mReturnCode;
+};
+
+#endif // BACKUPQUERIES__H
+
diff --git a/bin/bbackupquery/Makefile.extra b/bin/bbackupquery/Makefile.extra
new file mode 100755
index 00000000..633ec0fc
--- /dev/null
+++ b/bin/bbackupquery/Makefile.extra
@@ -0,0 +1,6 @@
+
+# AUTOGEN SEEDING
+autogen_Documentation.cpp: makedocumentation.pl documentation.txt
+ perl makedocumentation.pl
+
+
diff --git a/bin/bbackupquery/bbackupquery.cpp b/bin/bbackupquery/bbackupquery.cpp
new file mode 100755
index 00000000..aea0faa8
--- /dev/null
+++ b/bin/bbackupquery/bbackupquery.cpp
@@ -0,0 +1,243 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbackupquery.cpp
+// Purpose: Backup query utility
+// Created: 2003/10/10
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#ifndef PLATFORM_READLINE_NOT_SUPPORTED
+ #ifdef PLATFORM_LINUX
+ #include "../../local/_linux_readline.h"
+ #else
+ #include <readline/readline.h>
+ #include <readline/history.h>
+ #endif
+#endif
+
+#include "MainHelper.h"
+#include "BoxPortsAndFiles.h"
+#include "BackupDaemonConfigVerify.h"
+#include "SocketStreamTLS.h"
+#include "Socket.h"
+#include "TLSContext.h"
+#include "SSLLib.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreException.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupQueries.h"
+#include "FdGetLine.h"
+#include "BackupClientCryptoKeys.h"
+#include "BannerText.h"
+
+#include "MemLeakFindOn.h"
+
+void PrintUsageAndExit()
+{
+ printf("Usage: bbackupquery [-q] [-c config_file] [-l log_file] [commands]\nAs many commands as you require.\n" \
+ "If commands are multiple words, remember to enclose the command in quotes.\n" \
+ "Remember to use quit command if you don't want to drop into interactive mode.\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_SETUP_MEMORY_LEAK_EXIT_REPORT("bbackupquery.memleaks", "bbackupquery")
+
+ // Really don't want trace statements happening, even in debug mode
+ #ifndef NDEBUG
+ BoxDebugTraceOn = false;
+ #endif
+
+ int returnCode = 0;
+
+ MAINHELPER_START
+
+ FILE *logFile = 0;
+
+ // Filename for configuraiton file?
+ const char *configFilename = BOX_FILE_BBACKUPD_DEFAULT_CONFIG;
+
+ // Flags
+ bool quiet = false;
+ bool readWrite = false;
+
+ // See if there's another entry on the command line
+ int c;
+ while((c = getopt(argc, (char * const *)argv, "qwc:l:")) != -1)
+ {
+ switch(c)
+ {
+ case 'q':
+ // Quiet mode
+ quiet = true;
+ break;
+
+ case 'w':
+ // Read/write mode
+ readWrite = true;
+ break;
+
+ case 'c':
+ // store argument
+ configFilename = optarg;
+ break;
+
+ case 'l':
+ // open log file
+ logFile = ::fopen(optarg, "w");
+ if(logFile == 0)
+ {
+ printf("Can't open log file '%s'\n", optarg);
+ }
+ break;
+
+ case '?':
+ default:
+ PrintUsageAndExit();
+ }
+ }
+ // Adjust arguments
+ argc -= optind;
+ argv += optind;
+
+ // Print banner?
+ if(!quiet)
+ {
+ const char *banner = BANNER_TEXT("Backup Query Tool");
+ printf(banner);
+ }
+
+ // Read in the configuration file
+ if(!quiet) printf("Using configuration file %s\n", configFilename);
+ std::string errs;
+ std::auto_ptr<Configuration> config(Configuration::LoadAndVerify(configFilename, &BackupDaemonConfigVerify, errs));
+ if(config.get() == 0 || !errs.empty())
+ {
+ printf("Invalid configuration file:\n%s", errs.c_str());
+ return 1;
+ }
+ // Easier coding
+ const Configuration &conf(*config);
+
+ // Setup and connect
+ // 1. TLS context
+ SSLLib::Initialise();
+ // Read in the certificates creating a TLS context
+ TLSContext tlsContext;
+ std::string certFile(conf.GetKeyValue("CertificateFile"));
+ std::string keyFile(conf.GetKeyValue("PrivateKeyFile"));
+ std::string caFile(conf.GetKeyValue("TrustedCAsFile"));
+ tlsContext.Initialise(false /* as client */, certFile.c_str(), keyFile.c_str(), caFile.c_str());
+
+ // Initialise keys
+ BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+
+ // 2. Connect to server
+ if(!quiet) printf("Connecting to store...\n");
+ SocketStreamTLS socket;
+ socket.Open(tlsContext, Socket::TypeINET, conf.GetKeyValue("StoreHostname").c_str(), BOX_PORT_BBSTORED);
+
+ // 3. Make a protocol, and handshake
+ if(!quiet) printf("Handshake with store...\n");
+ BackupProtocolClient connection(socket);
+ connection.Handshake();
+
+ // logging?
+ if(logFile != 0)
+ {
+ connection.SetLogToFile(logFile);
+ }
+
+ // 4. Log in to server
+ if(!quiet) printf("Login to store...\n");
+ // Check the version of the server
+ {
+ std::auto_ptr<BackupProtocolClientVersion> serverVersion(connection.QueryVersion(BACKUP_STORE_SERVER_VERSION));
+ if(serverVersion->GetVersion() != BACKUP_STORE_SERVER_VERSION)
+ {
+ THROW_EXCEPTION(BackupStoreException, WrongServerVersion)
+ }
+ }
+ // Login -- if this fails, the Protocol will exception
+ connection.QueryLogin(conf.GetKeyValueInt("AccountNumber"),
+ (readWrite)?0:(BackupProtocolClientLogin::Flags_ReadOnly));
+
+ // 5. Tell user.
+ if(!quiet) printf("Login complete.\n\nType \"help\" for a list of commands.\n\n");
+
+ // Set up a context for our work
+ BackupQueries context(connection, conf);
+
+ // Start running commands... first from the command line
+ {
+ int c = 0;
+ while(c < argc && !context.Stop())
+ {
+ context.DoCommand(argv[c++]);
+ }
+ }
+
+ // Get commands from input
+#ifndef PLATFORM_READLINE_NOT_SUPPORTED
+ using_history();
+ char *last_cmd = 0;
+ while(!context.Stop())
+ {
+ char *command = readline("query > ");
+ if(command == NULL)
+ {
+ // Ctrl-D pressed -- terminate now
+ break;
+ }
+ context.DoCommand(command);
+ if(last_cmd != 0 && ::strcmp(last_cmd, command) == 0)
+ {
+ free(command);
+ }
+ else
+ {
+ add_history(command);
+ last_cmd = command;
+ }
+ }
+#else
+ // Version for platforms which don't have readline by default
+ FdGetLine getLine(fileno(stdin));
+ while(!context.Stop())
+ {
+ printf("query > ");
+ fflush(stdout);
+ std::string command(getLine.GetLine());
+ context.DoCommand(command.c_str());
+ }
+#endif
+
+ // Done... stop nicely
+ if(!quiet) printf("Logging off...\n");
+ connection.QueryFinished();
+ if(!quiet) printf("Session finished.\n");
+
+ // Return code
+ returnCode = context.GetReturnCode();
+
+ // Close log file?
+ if(logFile)
+ {
+ ::fclose(logFile);
+ }
+
+ // Let everything be cleaned up on exit.
+
+ MAINHELPER_END
+
+ exit(returnCode);
+ return returnCode;
+}
+
diff --git a/bin/bbackupquery/documentation.txt b/bin/bbackupquery/documentation.txt
new file mode 100755
index 00000000..429caabe
--- /dev/null
+++ b/bin/bbackupquery/documentation.txt
@@ -0,0 +1,165 @@
+
+bbackupquery utility -- examine store, compare files, restore, etc.
+
+This file has markers for automatic help generation script -- '>' marks a start of a command/help topic,
+and '<' marks the end of a section.
+
+Command line:
+=============
+
+> bbackupquery [-q] [-c configfile] [commands ...]
+
+ -q -- quiet, no information prompts
+ -c -- specify another bbackupd configuation file
+
+The commands following the options are executed, then (if there was no quit
+command) an interactive mode is entered.
+
+If a command contains a space, enclose it in quotes. Example
+
+ bbackupquery "list testdir1" quit
+
+to list the contents of testdir1, and then exit without interactive mode.
+<
+
+Commands:
+=========
+
+All directory names relative to a "current" directory, or from root if they
+start with '/'. The initial directory is always the root directory.
+
+
+> list [options] [directory-name]
+
+ List contents of current directory, or specified directory.
+
+ -r -- recursively list all files
+ -d -- list deleted files/directories
+ -o -- list old versions of files/directories
+ -I -- don't display object ID
+ -F -- don't display flags
+ -t -- show file modification time
+ (and attr mod time if has the object has attributes, ~ separated)
+ -s -- show file size in blocks used on server
+ (only very approximate indication of size locally)
+
+ls can be used as an alias.
+<
+
+> ls
+
+ Alias for 'list'. Type 'help list' for options.
+<
+
+> cd [options] <directory-name>
+
+ Change directory
+
+ -d -- consider deleted directories for traversal
+ -o -- consider old versions of directories for traversal
+ (this option should never be useful in a correctly formed store)
+<
+
+> pwd
+
+ Print current directory, always root relative.
+<
+
+> lcd <local-directory-name>
+
+ Change local directory.
+
+ Type "sh ls" to list the contents.
+<
+
+> sh <shell command>
+
+ All of the parameters after the "sh" are run as a shell command.
+
+ For example, to list the contents of the location directory, type "sh ls"
+<
+
+> get <object-filename> [<local-filename>]
+get -i <object-id> <local-filename>
+
+ Gets a file from the store. Object is specified as the filename within
+ the current directory, and local filename is optional. Ignores old and
+ deleted files when searching the directory for the file to retrieve.
+
+ To get an old or deleted file, use the -i option and select the object
+ as a hex object ID (first column in listing). The local filename must
+ be specified.
+<
+
+> compare -a
+compare -l <location-name>
+compare <store-dir-name> <local-dir-name>
+
+ Compares the (current) data on the store with the data on the disc.
+ All the data will be downloaded -- this is potentially a very long
+ operation.
+
+ -a -- compare all locations
+ -l -- compare one backup location as specified in the configuration file.
+ -c -- set return code
+ -q -- quick compare. Only checks file contents against checksums,
+ doesn't do a full download
+ -E -- ignore exclusion settings
+
+ Comparing with the root directory is an error, use -a option instead.
+
+ If -c is set, then the return code (if quit is the next command) will be
+ 1 Comparison was exact
+ 2 Differences were found
+ 3 An error occured
+ This can be used for automated tests.
+<
+
+> restore [-d] [-r] [-i] <directory-name> <local-directory-name>
+
+ Restores a directory to the local disc. The local directory specified
+ must not exist (unless a previous restore is being restarted).
+
+ The root cannot be restored -- restore locations individually.
+
+ -d -- restore a deleted directory.
+ -r -- resume an interrupted restoration
+ -i -- directory name is actually an ID
+
+ If a restore operation is interrupted for any reason, it can be restarted
+ using the -r switch. Restore progress information is saved in a file at
+ regular intervals during the restore operation to allow restarts.
+<
+
+> getobject <object-id> <local-filename>
+
+ Gets the object specified by the object id (in hex) and stores the raw
+ contents in the local file specified.
+
+ This is only useful for debugging as it does not decode files from the
+ stored format, which is encrypted and compressed.
+<
+
+> usage
+
+ Show space used on the server for this account.
+
+ Used: Total amount of space used on the server.
+ Old files: Space used by old files
+ Deleted files: Space used by deleted files
+ Directories: Space used by the directory structure.
+
+ When Used exceeds the soft limit, the server will start to remove old and
+ deleted files until the usage drops below the soft limit.
+
+ After a while, you would expect to see the usage stay at just below the
+ soft limit. You only need more space if the space used by old and deleted
+ files is near zero.
+<
+
+> quit
+
+ End session and exit.
+<
+
+
diff --git a/bin/bbackupquery/makedocumentation.pl b/bin/bbackupquery/makedocumentation.pl
new file mode 100755
index 00000000..a3632848
--- /dev/null
+++ b/bin/bbackupquery/makedocumentation.pl
@@ -0,0 +1,75 @@
+#!/usr/bin/perl
+use strict;
+
+print "Creating built-in documentation for bbackupquery...\n";
+
+open DOC,"documentation.txt" or die "Can't open documentation.txt file";
+my $section;
+my %help;
+my @in_order;
+
+while(<DOC>)
+{
+ if(m/\A>\s+(\w+)/)
+ {
+ $section = $1;
+ m/\A>\s+(.+)\Z/;
+ $help{$section} = $1."\n";
+ push @in_order,$section;
+ }
+ elsif(m/\A</)
+ {
+ $section = '';
+ }
+ elsif($section ne '')
+ {
+ $help{$section} .= $_;
+ }
+}
+
+close DOC;
+
+open OUT,">autogen_Documentation.cpp" or die "Can't open output file for writing";
+
+print OUT <<__E;
+//
+// Automatically generated file, do not edit.
+//
+
+#include "Box.h"
+
+#include "MemLeakFindOn.h"
+
+char *help_commands[] =
+{
+__E
+
+for(@in_order)
+{
+ print OUT qq:\t"$_",\n:;
+}
+
+print OUT <<__E;
+ 0
+};
+
+char *help_text[] =
+{
+__E
+
+for(@in_order)
+{
+ my $t = $help{$_};
+ $t =~ s/\t/ /g;
+ $t =~ s/\n/\\n/g;
+ $t =~ s/"/\\"/g;
+ print OUT qq:\t"$t",\n:;
+}
+
+print OUT <<__E;
+ 0
+};
+
+__E
+
+close OUT;
diff --git a/bin/bbstoreaccounts/bbstoreaccounts.cpp b/bin/bbstoreaccounts/bbstoreaccounts.cpp
new file mode 100755
index 00000000..89edd0b2
--- /dev/null
+++ b/bin/bbstoreaccounts/bbstoreaccounts.cpp
@@ -0,0 +1,548 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbstoreaccounts
+// Purpose: backup store administration tool
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <limits.h>
+#include <vector>
+#include <algorithm>
+
+#include "BoxPortsAndFiles.h"
+#include "BackupStoreConfigVerify.h"
+#include "RaidFileController.h"
+#include "BackupStoreAccounts.h"
+#include "BackupStoreAccountDatabase.h"
+#include "MainHelper.h"
+#include "BackupStoreInfo.h"
+#include "StoreStructure.h"
+#include "NamedLock.h"
+#include "UnixUser.h"
+#include "BackupStoreCheck.h"
+
+#include "MemLeakFindOn.h"
+
+// max size of soft limit as percent of hard limit
+#define MAX_SOFT_LIMIT_SIZE 97
+
+void CheckSoftHardLimits(int64_t SoftLimit, int64_t HardLimit)
+{
+ if(SoftLimit >= HardLimit)
+ {
+ printf("ERROR: Soft limit must be less than the hard limit.\n");
+ exit(1);
+ }
+ if(SoftLimit > ((HardLimit * MAX_SOFT_LIMIT_SIZE) / 100))
+ {
+ printf("ERROR: Soft limit must be no more than %d%% of the hard limit.\n", MAX_SOFT_LIMIT_SIZE);
+ exit(1);
+ }
+}
+
+int BlockSizeOfDiscSet(int DiscSet)
+{
+ // Get controller, check disc set number
+ RaidFileController &controller(RaidFileController::GetController());
+ if(DiscSet < 0 || DiscSet >= controller.GetNumDiscSets())
+ {
+ printf("Disc set %d does not exist\n", DiscSet);
+ exit(1);
+ }
+
+ // Return block size
+ return controller.GetDiscSet(DiscSet).GetBlockSize();
+}
+
+const char *BlockSizeToString(int64_t Blocks, int DiscSet)
+{
+ // Not reentrant, nor can be used in the same function call twice, etc.
+ static char string[256];
+
+ // Work out size in Mb.
+ double mb = (Blocks * BlockSizeOfDiscSet(DiscSet)) / (1024.0*1024.0);
+
+ // Format string
+ sprintf(string, "%lld (%.2fMb)", Blocks, mb);
+
+ return string;
+}
+
+int64_t SizeStringToBlocks(const char *string, int DiscSet)
+{
+ // Find block size
+ int blockSize = BlockSizeOfDiscSet(DiscSet);
+
+ // Get number
+ char *endptr = (char*)string;
+ int64_t number = strtol(string, &endptr, 0);
+ if(endptr == string || number == LONG_MIN || number == LONG_MAX)
+ {
+ printf("%s is an invalid number\n", string);
+ exit(1);
+ }
+
+ // Check units
+ switch(*endptr)
+ {
+ case 'M':
+ case 'm':
+ // Units: Mb
+ return (number * 1024*1024) / blockSize;
+ break;
+
+ case 'G':
+ case 'g':
+ // Units: Gb
+ return (number * 1024*1024*1024) / blockSize;
+ break;
+
+ case 'B':
+ case 'b':
+ // Units: Blocks
+ // Easy! Just return the number specified.
+ return number;
+ break;
+
+ default:
+ printf("%s has an invalid units specifier\nUse B for blocks, M for Mb, G for Gb, eg 2Gb\n", string);
+ exit(1);
+ break;
+ }
+}
+
+bool GetWriteLockOnAccount(NamedLock &rLock, const std::string rRootDir, int DiscSetNum)
+{
+ std::string writeLockFilename;
+ StoreStructure::MakeWriteLockFilename(rRootDir, DiscSetNum, writeLockFilename);
+
+ bool gotLock = false;
+ int triesLeft = 8;
+ do
+ {
+ gotLock = rLock.TryAndGetLock(writeLockFilename.c_str(), 0600 /* restrictive file permissions */);
+
+ if(!gotLock)
+ {
+ --triesLeft;
+ ::sleep(1);
+ }
+ } while(!gotLock && triesLeft > 0);
+
+ if(!gotLock)
+ {
+ // Couldn't lock the account -- just stop now
+ printf("Couldn't lock the account -- did not change the limits\nTry again later.\n");
+ return 1;
+ }
+
+ return gotLock;
+}
+
+int SetLimit(Configuration &rConfig, const std::string &rUsername, int32_t ID, const char *SoftLimitStr, const char *HardLimitStr)
+{
+ // Become the user specified in the config file?
+ std::auto_ptr<UnixUser> user;
+ if(!rUsername.empty())
+ {
+ // Username specified, change...
+ user.reset(new UnixUser(rUsername.c_str()));
+ user->ChangeProcessUser(true /* temporary */);
+ // Change will be undone at the end of this function
+ }
+
+ // Load in the account database
+ std::auto_ptr<BackupStoreAccountDatabase> db(BackupStoreAccountDatabase::Read(rConfig.GetKeyValue("AccountDatabase").c_str()));
+
+ // Already exists?
+ if(!db->EntryExists(ID))
+ {
+ printf("Account %x does not exist\n", ID);
+ return 1;
+ }
+
+ // Load it in
+ BackupStoreAccounts acc(*db);
+ std::string rootDir;
+ int discSet;
+ acc.GetAccountRoot(ID, rootDir, discSet);
+
+ // Attempt to lock
+ NamedLock writeLock;
+ if(!GetWriteLockOnAccount(writeLock, rootDir, discSet))
+ {
+ // Failed to get lock
+ return 1;
+ }
+
+ // Load the info
+ std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::Load(ID, rootDir, discSet, false /* Read/Write */));
+
+ // Change the limits
+ int64_t softlimit = SizeStringToBlocks(SoftLimitStr, discSet);
+ int64_t hardlimit = SizeStringToBlocks(HardLimitStr, discSet);
+ CheckSoftHardLimits(softlimit, hardlimit);
+ info->ChangeLimits(softlimit, hardlimit);
+
+ // Save
+ info->Save();
+
+ printf("Limits on account 0x%08x changed to %lld soft, %lld hard\n", ID, softlimit, hardlimit);
+
+ return 0;
+}
+
+int AccountInfo(Configuration &rConfig, int32_t ID)
+{
+ // Load in the account database
+ std::auto_ptr<BackupStoreAccountDatabase> db(BackupStoreAccountDatabase::Read(rConfig.GetKeyValue("AccountDatabase").c_str()));
+
+ // Exists?
+ if(!db->EntryExists(ID))
+ {
+ printf("Account %x does not exist\n", ID);
+ return 1;
+ }
+
+ // Load it in
+ BackupStoreAccounts acc(*db);
+ std::string rootDir;
+ int discSet;
+ acc.GetAccountRoot(ID, rootDir, discSet);
+ std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::Load(ID, rootDir, discSet, true /* ReadOnly */));
+
+ // Then print out lots of info
+ printf(" Account ID: %08x\n", ID);
+ printf(" Last object ID: %lld\n", info->GetLastObjectIDUsed());
+ printf(" Blocks used: %s\n", BlockSizeToString(info->GetBlocksUsed(), discSet));
+ printf(" Blocks used by old files: %s\n", BlockSizeToString(info->GetBlocksInOldFiles(), discSet));
+ printf("Blocks used by deleted files: %s\n", BlockSizeToString(info->GetBlocksInDeletedFiles(), discSet));
+ printf(" Blocks used by directories: %s\n", BlockSizeToString(info->GetBlocksInDirectories(), discSet));
+ printf(" Block soft limit: %s\n", BlockSizeToString(info->GetBlocksSoftLimit(), discSet));
+ printf(" Block hard limit: %s\n", BlockSizeToString(info->GetBlocksHardLimit(), discSet));
+ printf(" Client store marker: %lld\n", info->GetClientStoreMarker());
+
+ return 0;
+}
+
+int DeleteAccount(Configuration &rConfig, const std::string &rUsername, int32_t ID, bool AskForConfirmation)
+{
+ // Check user really wants to do this
+ if(AskForConfirmation)
+ {
+ ::printf("Really delete account %08x?\n(type 'yes' to confirm)\n", ID);
+ char response[256];
+ if(::fgets(response, sizeof(response), stdin) == 0 || ::strcmp(response, "yes\n") != 0)
+ {
+ printf("Deletion cancelled\n");
+ return 0;
+ }
+ }
+
+ // Load in the account database
+ std::auto_ptr<BackupStoreAccountDatabase> db(BackupStoreAccountDatabase::Read(rConfig.GetKeyValue("AccountDatabase").c_str()));
+
+ // Exists?
+ if(!db->EntryExists(ID))
+ {
+ printf("Account %x does not exist\n", ID);
+ return 1;
+ }
+
+ // Get info from the database
+ BackupStoreAccounts acc(*db);
+ std::string rootDir;
+ int discSetNum;
+ acc.GetAccountRoot(ID, rootDir, discSetNum);
+
+ // Obtain a write lock, as the daemon user
+ NamedLock writeLock;
+ {
+ // Bbecome the user specified in the config file
+ std::auto_ptr<UnixUser> user;
+ if(!rUsername.empty())
+ {
+ // Username specified, change...
+ user.reset(new UnixUser(rUsername.c_str()));
+ user->ChangeProcessUser(true /* temporary */);
+ // Change will be undone at the end of this function
+ }
+
+ // Get a write lock
+ if(!GetWriteLockOnAccount(writeLock, rootDir, discSetNum))
+ {
+ // Failed to get lock
+ return 1;
+ }
+
+ // Back to original user, but write is maintained
+ }
+
+ // Delete from account database
+ db->DeleteEntry(ID);
+
+ // Write back to disc
+ db->Write();
+
+ // Remove the store files...
+
+ // First, become the user specified in the config file
+ std::auto_ptr<UnixUser> user;
+ if(!rUsername.empty())
+ {
+ // Username specified, change...
+ user.reset(new UnixUser(rUsername.c_str()));
+ user->ChangeProcessUser(true /* temporary */);
+ // Change will be undone at the end of this function
+ }
+
+ // Secondly, work out which directories need wiping
+ std::vector<std::string> toDelete;
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ RaidFileDiscSet discSet(rcontroller.GetDiscSet(discSetNum));
+ for(RaidFileDiscSet::const_iterator i(discSet.begin()); i != discSet.end(); ++i)
+ {
+ if(std::find(toDelete.begin(), toDelete.end(), *i) == toDelete.end())
+ {
+ toDelete.push_back((*i) + DIRECTORY_SEPARATOR + rootDir);
+ }
+ }
+
+ // Thirdly, delete the directories...
+ for(std::vector<std::string>::const_iterator d(toDelete.begin()); d != toDelete.end(); ++d)
+ {
+ ::printf("Deleting store directory %s...\n", (*d).c_str());
+ // Just use the rm command to delete the files
+ std::string cmd("rm -rf ");
+ cmd += *d;
+ // Run command
+ if(::system(cmd.c_str()) != 0)
+ {
+ ::printf("ERROR: Deletion of %s failed.\n(when cleaning up, remember to delete all raid directories)\n", (*d).c_str());
+ return 1;
+ }
+ }
+
+ // Success!
+ return 0;
+}
+
+int CheckAccount(Configuration &rConfig, const std::string &rUsername, int32_t ID, bool FixErrors, bool Quiet)
+{
+ // Load in the account database
+ std::auto_ptr<BackupStoreAccountDatabase> db(BackupStoreAccountDatabase::Read(rConfig.GetKeyValue("AccountDatabase").c_str()));
+
+ // Exists?
+ if(!db->EntryExists(ID))
+ {
+ printf("Account %x does not exist\n", ID);
+ return 1;
+ }
+
+ // Get info from the database
+ BackupStoreAccounts acc(*db);
+ std::string rootDir;
+ int discSetNum;
+ acc.GetAccountRoot(ID, rootDir, discSetNum);
+
+ // Become the right user
+ std::auto_ptr<UnixUser> user;
+ if(!rUsername.empty())
+ {
+ // Username specified, change...
+ user.reset(new UnixUser(rUsername.c_str()));
+ user->ChangeProcessUser(true /* temporary */);
+ // Change will be undone at the end of this function
+ }
+
+ // Check it
+ BackupStoreCheck check(rootDir, discSetNum, ID, FixErrors, Quiet);
+ check.Check();
+
+ return check.ErrorsFound()?1:0;
+}
+
+int CreateAccount(Configuration &rConfig, const std::string &rUsername, int32_t ID, int32_t DiscNumber, int32_t SoftLimit, int32_t HardLimit)
+{
+ // Load in the account database
+ std::auto_ptr<BackupStoreAccountDatabase> db(BackupStoreAccountDatabase::Read(rConfig.GetKeyValue("AccountDatabase").c_str()));
+
+ // Already exists?
+ if(db->EntryExists(ID))
+ {
+ printf("Account %x already exists\n", ID);
+ return 1;
+ }
+
+ // Create it.
+ BackupStoreAccounts acc(*db);
+ acc.Create(ID, DiscNumber, SoftLimit, HardLimit, rUsername);
+
+ printf("Account %x created\n", ID);
+
+ return 0;
+}
+
+void PrintUsageAndExit()
+{
+ printf("Usage: bbstoreaccounts [-c config_file] action account_id [args]\nAccount ID is integer specified in hex\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_SETUP_MEMORY_LEAK_EXIT_REPORT("bbstoreaccounts.memleaks", "bbstoreaccounts")
+
+ MAINHELPER_START
+
+ // Filename for configuraiton file?
+ const char *configFilename = BOX_FILE_BBSTORED_DEFAULT_CONFIG;
+
+ // See if there's another entry on the command line
+ int c;
+ while((c = getopt(argc, (char * const *)argv, "c:")) != -1)
+ {
+ switch(c)
+ {
+ case 'c':
+ // store argument
+ configFilename = optarg;
+ break;
+
+ case '?':
+ default:
+ PrintUsageAndExit();
+ }
+ }
+ // Adjust arguments
+ argc -= optind;
+ argv += optind;
+
+ // Read in the configuration file
+ std::string errs;
+ std::auto_ptr<Configuration> config(Configuration::LoadAndVerify(configFilename, &BackupConfigFileVerify, errs));
+ if(config.get() == 0 || !errs.empty())
+ {
+ printf("Invalid configuration file:\n%s", errs.c_str());
+ }
+
+ // Get the user under which the daemon runs
+ std::string username;
+ {
+ const Configuration &rserverConfig(config->GetSubConfiguration("Server"));
+ if(rserverConfig.KeyExists("User"))
+ {
+ username = rserverConfig.GetKeyValue("User");
+ }
+ }
+
+ // Initialise the raid file controller
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ rcontroller.Initialise(config->GetKeyValue("RaidFileConf").c_str());
+
+ // Then... check we have two arguments
+ if(argc < 2)
+ {
+ PrintUsageAndExit();
+ }
+
+ // Get the id
+ int32_t id;
+ if(::sscanf(argv[1], "%x", &id) != 1)
+ {
+ PrintUsageAndExit();
+ }
+
+ // Now do the command.
+ if(::strcmp(argv[0], "create") == 0)
+ {
+ // which disc?
+ int32_t discnum;
+ int32_t softlimit;
+ int32_t hardlimit;
+ if(argc < 5
+ || ::sscanf(argv[2], "%d", &discnum) != 1)
+ {
+ printf("create requires raid file disc number, soft and hard limits\n");
+ return 1;
+ }
+
+ // Decode limits
+ softlimit = SizeStringToBlocks(argv[3], discnum);
+ hardlimit = SizeStringToBlocks(argv[4], discnum);
+ CheckSoftHardLimits(softlimit, hardlimit);
+
+ // Create the account...
+ return CreateAccount(*config, username, id, discnum, softlimit, hardlimit);
+ }
+ else if(::strcmp(argv[0], "info") == 0)
+ {
+ // Print information on this account
+ return AccountInfo(*config, id);
+ }
+ else if(::strcmp(argv[0], "setlimit") == 0)
+ {
+ // Change the limits on this account
+ if(argc < 4)
+ {
+ printf("setlimit requires soft and hard limits\n");
+ return 1;
+ }
+
+ return SetLimit(*config, username, id, argv[2], argv[3]);
+ }
+ else if(::strcmp(argv[0], "delete") == 0)
+ {
+ // Delete an account
+ bool askForConfirmation = true;
+ if(argc >= 3 && (::strcmp(argv[2], "yes") == 0))
+ {
+ askForConfirmation = false;
+ }
+ return DeleteAccount(*config, username, id, askForConfirmation);
+ }
+ else if(::strcmp(argv[0], "check") == 0)
+ {
+ bool fixErrors = false;
+ bool quiet = false;
+
+ // Look at other options
+ for(int o = 2; o < argc; ++o)
+ {
+ if(::strcmp(argv[o], "fix") == 0)
+ {
+ fixErrors = true;
+ }
+ else if(::strcmp(argv[o], "quiet") == 0)
+ {
+ quiet = true;
+ }
+ else
+ {
+ ::printf("Unknown option %s.\n", argv[o]);
+ return 2;
+ }
+ }
+
+ // Check the account
+ return CheckAccount(*config, username, id, fixErrors, quiet);
+ }
+ else
+ {
+ printf("Unknown command '%s'\n", argv[0]);
+ return 1;
+ }
+
+ return 0;
+
+ MAINHELPER_END
+}
+
+
diff --git a/bin/bbstored/BBStoreDHousekeeping.cpp b/bin/bbstored/BBStoreDHousekeeping.cpp
new file mode 100755
index 00000000..2eb19089
--- /dev/null
+++ b/bin/bbstored/BBStoreDHousekeeping.cpp
@@ -0,0 +1,175 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BBStoreDHousekeeping.cpp
+// Purpose: Implementation of housekeeping functions for bbstored
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+#include <syslog.h>
+
+#include "BackupStoreDaemon.h"
+#include "BackupStoreAccountDatabase.h"
+#include "BackupStoreAccounts.h"
+#include "HousekeepStoreAccount.h"
+#include "BoxTime.h"
+#include "Configuration.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::HousekeepingProcess()
+// Purpose: Do housekeeping
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void BackupStoreDaemon::HousekeepingProcess()
+{
+ // Get the time between housekeeping runs
+ const Configuration &rconfig(GetConfiguration());
+ int64_t housekeepingInterval = SecondsToBoxTime((uint32_t)rconfig.GetKeyValueInt("TimeBetweenHousekeeping"));
+
+ int64_t lastHousekeepingRun = 0;
+
+ while(!StopRun())
+ {
+ // Time now
+ int64_t timeNow = GetCurrentBoxTime();
+ // Do housekeeping if the time interval has elapsed since the last check
+ if((timeNow - lastHousekeepingRun) >= housekeepingInterval)
+ {
+ // Store the time
+ lastHousekeepingRun = timeNow;
+ ::syslog(LOG_INFO, "Starting housekeeping");
+
+ // Get the list of accounts
+ std::vector<int32_t> accounts;
+ if(mpAccountDatabase)
+ {
+ mpAccountDatabase->GetAllAccountIDs(accounts);
+ }
+
+ SetProcessTitle("housekeeping, active");
+
+ // Check them all
+ for(std::vector<int32_t>::const_iterator i = accounts.begin(); i != accounts.end(); ++i)
+ {
+ try
+ {
+ if(mpAccounts)
+ {
+ // Get the account root
+ std::string rootDir;
+ int discSet = 0;
+ mpAccounts->GetAccountRoot(*i, rootDir, discSet);
+
+ // Do housekeeping on this account
+ HousekeepStoreAccount housekeeping(*i, rootDir, discSet, *this);
+ housekeeping.DoHousekeeping();
+ }
+ }
+ catch(BoxException &e)
+ {
+ ::syslog(LOG_ERR, "while housekeeping account %08X, exception %s (%d/%d) -- aborting housekeeping run for this account",
+ *i, e.what(), e.GetType(), e.GetSubType());
+ }
+ catch(std::exception &e)
+ {
+ ::syslog(LOG_ERR, "while housekeeping account %08X, exception %s -- aborting housekeeping run for this account",
+ *i, e.what());
+ }
+ catch(...)
+ {
+ ::syslog(LOG_ERR, "while housekeeping account %08X, unknown exception -- aborting housekeeping run for this account",
+ *i);
+ }
+
+ // Check to see if there's any message pending
+ CheckForInterProcessMsg(0 /* no account */);
+
+ // Stop early?
+ if(StopRun())
+ {
+ break;
+ }
+ }
+
+ ::syslog(LOG_INFO, "Finished housekeeping");
+ }
+
+ // Placed here for accuracy, if StopRun() is true, for example.
+ SetProcessTitle("housekeeping, idle");
+
+ // Calculate how long should wait before doing the next housekeeping run
+ timeNow = GetCurrentBoxTime();
+ int64_t secondsToGo = BoxTimeToSeconds((lastHousekeepingRun + housekeepingInterval) - timeNow);
+ if(secondsToGo < 1) secondsToGo = 1;
+ if(secondsToGo > 60) secondsToGo = 60;
+ int32_t millisecondsToGo = ((int)secondsToGo) * 1000;
+
+ // Check to see if there's any message pending
+ CheckForInterProcessMsg(0 /* no account */, millisecondsToGo);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::CheckForInterProcessMsg(int, int)
+// Purpose: Process a message, returning true if the housekeeping process
+// should abort for the specified account.
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool BackupStoreDaemon::CheckForInterProcessMsg(int AccountNum, int MaximumWaitTime)
+{
+ // First, check to see if it's EOF -- this means something has gone wrong, and the housekeeping should terminate.
+ if(mInterProcessComms.IsEOF())
+ {
+ SetTerminateWanted();
+ return true;
+ }
+
+ // Get a line, and process the message
+ std::string line;
+ if(mInterProcessComms.GetLine(line, false /* no pre-processing */, MaximumWaitTime))
+ {
+ TRACE1("housekeeping received command '%s' over interprocess comms\n", line.c_str());
+
+ int account = 0;
+
+ if(line == "h")
+ {
+ // HUP signal received by main process
+ SetReloadConfigWanted();
+ return true;
+ }
+ else if(line == "t")
+ {
+ // Terminate signal received by main process
+ SetTerminateWanted();
+ return true;
+ }
+ else if(sscanf(line.c_str(), "r%x", &account) == 1)
+ {
+ // Main process is trying to lock an account -- are we processing it?
+ if(account == AccountNum)
+ {
+ // Yes! -- need to stop now so when it retries to get the lock, it will succeed
+ ::syslog(LOG_INFO, "Housekeeping giving way to connection for account 0x%08x", AccountNum);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
diff --git a/bin/bbstored/BackupCommands.cpp b/bin/bbstored/BackupCommands.cpp
new file mode 100755
index 00000000..1a021a58
--- /dev/null
+++ b/bin/bbstored/BackupCommands.cpp
@@ -0,0 +1,861 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupCommands.cpp
+// Purpose: Implement commands for the Backup store protocol
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <syslog.h>
+
+#include "autogen_BackupProtocolServer.h"
+#include "BackupConstants.h"
+#include "BackupContext.h"
+#include "CollectInBufferStream.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
+#include "BackupStoreFile.h"
+#include "StreamableMemBlock.h"
+#include "BackupStoreConstants.h"
+#include "RaidFileController.h"
+#include "BackupStoreInfo.h"
+#include "RaidFileController.h"
+#include "FileStream.h"
+
+#include "MemLeakFindOn.h"
+
+#define CHECK_PHASE(phase) \
+ if(rContext.GetPhase() != BackupContext::phase) \
+ { \
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError( \
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_NotInRightProtocolPhase)); \
+ }
+
+#define CHECK_WRITEABLE_SESSION \
+ if(rContext.SessionIsReadOnly()) \
+ { \
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError( \
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_SessionReadOnly)); \
+ }
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerVersion::DoCommand(Protocol &, BackupContext &)
+// Purpose: Return the current version, or an error if the requested version isn't allowed
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerVersion::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Version)
+
+ // Correct version?
+ if(mVersion != BACKUP_STORE_SERVER_VERSION)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_WrongVersion));
+ }
+
+ // Mark the next phase
+ rContext.SetPhase(BackupContext::Phase_Login);
+
+ // Return our version
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerVersion(BACKUP_STORE_SERVER_VERSION));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerLogin::DoCommand(Protocol &, BackupContext &)
+// Purpose: Return the current version, or an error if the requested version isn't allowed
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerLogin::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Login)
+
+ // Check given client ID against the ID in the certificate certificate
+ // and that the client actually has an account on this machine
+ if(mClientID != rContext.GetClientID() || !rContext.GetClientHasAccount())
+ {
+ ::syslog(LOG_INFO, "Failed login: Client ID presented was %08X", mClientID);
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_BadLogin));
+ }
+
+ // If we need to write, check that nothing else has got a write lock
+ if((mFlags & Flags_ReadOnly) != Flags_ReadOnly)
+ {
+ // See if the context will get the lock
+ if(!rContext.AttemptToGetWriteLock())
+ {
+ ::syslog(LOG_INFO, "Failed to get write lock (for Client ID %08X)", mClientID);
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_CannotLockStoreForWriting));
+ }
+
+ // Debug: check we got the lock
+ ASSERT(!rContext.SessionIsReadOnly());
+ }
+
+ // Load the store info
+ rContext.LoadStoreInfo();
+
+ // Get the last client store marker
+ int64_t clientStoreMarker = rContext.GetClientStoreMarker();
+
+ // Mark the next phase
+ rContext.SetPhase(BackupContext::Phase_Commands);
+
+ // Log login
+ ::syslog(LOG_INFO, "Login: Client ID %08X, %s", mClientID, ((mFlags & Flags_ReadOnly) != Flags_ReadOnly)?"Read/Write":"Read-only");
+
+ // Get the usage info for reporting to the client
+ int64_t blocksUsed = 0, blocksSoftLimit = 0, blocksHardLimit = 0;
+ rContext.GetStoreDiscUsageInfo(blocksUsed, blocksSoftLimit, blocksHardLimit);
+
+ // Return success
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerLoginConfirmed(clientStoreMarker, blocksUsed, blocksSoftLimit, blocksHardLimit));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerFinished::DoCommand(Protocol &, BackupContext &)
+// Purpose: Marks end of conversation (Protocol framework handles this)
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerFinished::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ ::syslog(LOG_INFO, "Session finished");
+
+ // Let the context know about it
+ rContext.ReceivedFinishCommand();
+
+ // can be called in any phase
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerFinished);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerListDirectory::DoCommand(Protocol &, BackupContext &)
+// Purpose: Command to list a directory
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerListDirectory::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Ask the context for a directory
+ const BackupStoreDirectory &rdir(rContext.GetDirectory(mObjectID));
+
+ // Store the listing to a stream
+ std::auto_ptr<CollectInBufferStream> stream(new CollectInBufferStream);
+ rdir.WriteToStream(*stream, mFlagsMustBeSet, mFlagsNotToBeSet, mSendAttributes,
+ false /* never send dependency info to the client */);
+ stream->SetForReading();
+
+ // Get the protocol to send the stream
+ rProtocol.SendStreamAfterCommand(stream.release());
+
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerStoreFile::DoCommand(Protocol &, BackupContext &)
+// Purpose: Command to store a file on the server
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerStoreFile::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Check that the diff from file actually exists, if it's specified
+ if(mDiffFromFileID != 0)
+ {
+ if(!rContext.ObjectExists(mDiffFromFileID, BackupContext::ObjectExists_File))
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DiffFromFileDoesNotExist));
+ }
+ }
+
+ // A stream follows, which contains the file
+ std::auto_ptr<IOStream> dirstream(rProtocol.ReceiveStream());
+
+ // Ask the context to store it
+ int64_t id = 0;
+ try
+ {
+ id = rContext.AddFile(*dirstream, mDirectoryObjectID, mModificationTime, mAttributesHash, mDiffFromFileID,
+ mFilename, true /* mark files with same name as old versions */);
+ }
+ catch(BackupStoreException &e)
+ {
+ if(e.GetSubType() == BackupStoreException::AddedFileDoesNotVerify)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_FileDoesNotVerify));
+ }
+ else if(e.GetSubType() == BackupStoreException::AddedFileExceedsStorageLimit)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_StorageLimitExceeded));
+ }
+ else
+ {
+ throw;
+ }
+ }
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(id));
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetObject::DoCommand(Protocol &, BackupContext &)
+// Purpose: Command to get an arbitary object from the server
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetObject::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Check the object exists
+ if(!rContext.ObjectExists(mObjectID))
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(NoObject));
+ }
+
+ // Open the object
+ std::auto_ptr<IOStream> object(rContext.OpenObject(mObjectID));
+
+ // Stream it to the peer
+ rProtocol.SendStreamAfterCommand(object.release());
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetFile::DoCommand(Protocol &, BackupContext &)
+// Purpose: Command to get an file object from the server -- may have to do a bit of
+// work to get the object.
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetFile::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Check the objects exist
+ if(!rContext.ObjectExists(mObjectID)
+ || !rContext.ObjectExists(mInDirectory))
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DoesNotExist));
+ }
+
+ // Get the directory it's in
+ const BackupStoreDirectory &rdir(rContext.GetDirectory(mInDirectory));
+
+ // Find the object within the directory
+ BackupStoreDirectory::Entry *pfileEntry = rdir.FindEntryByID(mObjectID);
+ if(pfileEntry == 0)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DoesNotExistInDirectory));
+ }
+
+ // The result
+ std::auto_ptr<IOStream> stream;
+
+ // Does this depend on anything?
+ if(pfileEntry->GetDependsNewer() != 0)
+ {
+ // File exists, but is a patch from a new version. Generate the older version.
+ std::vector<int64_t> patchChain;
+ int64_t id = mObjectID;
+ BackupStoreDirectory::Entry *en = 0;
+ do
+ {
+ patchChain.push_back(id);
+ en = rdir.FindEntryByID(id);
+ if(en == 0)
+ {
+ ::syslog(LOG_ERR, "Object %llx in dir %llx for account %x references object %llx which does not exist in dir",
+ mObjectID, mInDirectory, rContext.GetClientID(), id);
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_PatchConsistencyError));
+ }
+ id = en->GetDependsNewer();
+ } while(en != 0 && id != 0);
+
+ // OK! The last entry in the chain is the full file, the others are patches back from it.
+ // Open the last one, which is the current from file
+ std::auto_ptr<IOStream> from(rContext.OpenObject(patchChain[patchChain.size() - 1]));
+
+ // Then, for each patch in the chain, do a combine
+ for(int p = ((int)patchChain.size()) - 2; p >= 0; --p)
+ {
+ // ID of patch
+ int64_t patchID = patchChain[p];
+
+ // Open it a couple of times
+ std::auto_ptr<IOStream> diff(rContext.OpenObject(patchID));
+ std::auto_ptr<IOStream> diff2(rContext.OpenObject(patchID));
+
+ // Choose a temporary filename for the result of the combination
+ std::string tempFn(RaidFileController::DiscSetPathToFileSystemPath(rContext.GetStoreDiscSet(), rContext.GetStoreRoot() + ".recombinetemp",
+ p + 16 /* rotate which disc it's on */));
+
+ // Open the temporary file
+ std::auto_ptr<IOStream> combined;
+ try
+ {
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(new FileStream(tempFn.c_str(), O_RDWR | O_CREAT | O_EXCL));
+ combined = t;
+ }
+ // Unlink immediately as it's a temporary file
+ if(::unlink(tempFn.c_str()) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+ }
+ catch(...)
+ {
+ // Make sure it goes
+ ::unlink(tempFn.c_str());
+ throw;
+ }
+
+ // Do the combining
+ BackupStoreFile::CombineFile(*diff, *diff2, *from, *combined);
+
+ // Move to the beginning of the combined file
+ combined->Seek(0, IOStream::SeekType_Absolute);
+
+ // Then shuffle round for the next go
+ from = combined;
+ }
+
+ // Now, from contains a nice file to send to the client. Reorder it
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(BackupStoreFile::ReorderFileToStreamOrder(from.get(), true /* take ownership */));
+ stream = t;
+ }
+
+ // Release from file to avoid double deletion
+ from.release();
+ }
+ else
+ {
+ // Simple case: file already exists on disc ready to go
+
+ // Open the object
+ std::auto_ptr<IOStream> object(rContext.OpenObject(mObjectID));
+
+ // Verify it
+ if(!BackupStoreFile::VerifyEncodedFileFormat(*object))
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_FileDoesNotVerify));
+ }
+
+ // Reset stream -- seek to beginning
+ object->Seek(0, IOStream::SeekType_Absolute);
+
+ // Reorder the stream/file into stream order
+ {
+ // Write nastily to allow this to work with gcc 2.x
+ std::auto_ptr<IOStream> t(BackupStoreFile::ReorderFileToStreamOrder(object.get(), true /* take ownership */));
+ stream = t;
+ }
+
+ // Object will be deleted when the stream is deleted, so can release the object auto_ptr here to
+ // avoid premature deletiong
+ object.release();
+ }
+
+ // Stream the reordered stream to the peer
+ rProtocol.SendStreamAfterCommand(stream.get());
+
+ // Don't delete the stream here
+ stream.release();
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerCreateDirectory::DoCommand(Protocol &, BackupContext &)
+// Purpose: Create directory command
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerCreateDirectory::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Check to see if the hard limit has been exceeded
+ if(rContext.HardLimitExceeded())
+ {
+ // Won't allow creation if the limit has been exceeded
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_StorageLimitExceeded));
+ }
+
+ bool alreadyExists = false;
+ int64_t id = rContext.AddDirectory(mContainingDirectoryID, mDirectoryName, attr, mAttributesModTime, alreadyExists);
+
+ if(alreadyExists)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DirectoryAlreadyExists));
+ }
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(id));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerChangeDirAttributes::DoCommand(Protocol &, BackupContext &)
+// Purpose: Change attributes on directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerChangeDirAttributes::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Get the context to do it's magic
+ rContext.ChangeDirAttributes(mObjectID, attr, mAttributesModTime);
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerSetReplacementFileAttributes::DoCommand(Protocol &, BackupContext &)
+// Purpose: Change attributes on directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerSetReplacementFileAttributes::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Get the stream containing the attributes
+ std::auto_ptr<IOStream> attrstream(rProtocol.ReceiveStream());
+ // Collect the attributes -- do this now so no matter what the outcome,
+ // the data has been absorbed.
+ StreamableMemBlock attr;
+ attr.Set(*attrstream, rProtocol.GetTimeout());
+
+ // Get the context to do it's magic
+ int64_t objectID = 0;
+ if(!rContext.ChangeFileAttributes(mFilename, mInDirectory, attr, mAttributesHash, objectID))
+ {
+ // Didn't exist
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DoesNotExist));
+ }
+
+ // Tell the caller what the file was
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(objectID));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerDeleteFile::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Delete a file
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerDeleteFile::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Context handles this
+ int64_t objectID = 0;
+ rContext.DeleteFile(mFilename, mInDirectory, objectID);
+
+ // return the object ID or zero for not found
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(objectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerDeleteDirectory::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Delete a directory
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerDeleteDirectory::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Check it's not asking for the root directory to be deleted
+ if(mObjectID == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_CannotDeleteRoot));
+ }
+
+ // Context handles this
+ rContext.DeleteDirectory(mObjectID);
+
+ // return the object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerUndeleteDirectory::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Undelete a directory
+// Created: 23/11/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerUndeleteDirectory::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Check it's not asking for the root directory to be deleted
+ if(mObjectID == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_CannotDeleteRoot));
+ }
+
+ // Context handles this
+ rContext.DeleteDirectory(mObjectID, true /* undelete */);
+
+ // return the object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerSetClientStoreMarker::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Command to set the client's store marker
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerSetClientStoreMarker::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Set the marker
+ rContext.SetClientStoreMarker(mClientStoreMarker);
+
+ // return store marker set
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mClientStoreMarker));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerMoveObject::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Command to move an object from one directory to another
+// Created: 2003/11/12
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerMoveObject::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+ CHECK_WRITEABLE_SESSION
+
+ // Let context do this, but modify error reporting on exceptions...
+ try
+ {
+ rContext.MoveObject(mObjectID, mMoveFromDirectory, mMoveToDirectory,
+ mNewFilename, (mFlags & Flags_MoveAllWithSameName) == Flags_MoveAllWithSameName,
+ (mFlags & Flags_AllowMoveOverDeletedObject) == Flags_AllowMoveOverDeletedObject);
+ }
+ catch(BackupStoreException &e)
+ {
+ if(e.GetSubType() == BackupStoreException::CouldNotFindEntryInDirectory)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_DoesNotExist));
+ }
+ else if(e.GetSubType() == BackupStoreException::NameAlreadyExistsInDirectory)
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerError(
+ BackupProtocolServerError::ErrorType, BackupProtocolServerError::Err_TargetNameExists));
+ }
+ else
+ {
+ throw;
+ }
+ }
+
+ // Return the object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetObjectName::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Command to find the name of an object
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetObjectName::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Create a stream for the list of filenames
+ std::auto_ptr<CollectInBufferStream> stream(new CollectInBufferStream);
+
+ // Object and directory IDs
+ int64_t objectID = mObjectID;
+ int64_t dirID = mContainingDirectoryID;
+
+ // Data to return in the reply
+ int32_t numNameElements = 0;
+ int16_t objectFlags = 0;
+ int64_t modTime = 0;
+ uint64_t attrModHash = 0;
+ bool haveModTimes = false;
+
+ do
+ {
+ // Check the directory really exists
+ if(!rContext.ObjectExists(dirID, BackupContext::ObjectExists_Directory))
+ {
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerObjectName(BackupProtocolServerObjectName::NumNameElements_ObjectDoesntExist, 0, 0, 0));
+ }
+
+ // Load up the directory
+ const BackupStoreDirectory &rdir(rContext.GetDirectory(dirID));
+
+ // Find the element in this directory and store it's name
+ if(objectID != ObjectID_DirectoryOnly)
+ {
+ const BackupStoreDirectory::Entry *en = rdir.FindEntryByID(objectID);
+
+ // If this can't be found, then there is a problem... tell the caller it can't be found
+ if(en == 0)
+ {
+ // Abort!
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerObjectName(BackupProtocolServerObjectName::NumNameElements_ObjectDoesntExist, 0, 0, 0));
+ }
+
+ // Store flags?
+ if(objectFlags == 0)
+ {
+ objectFlags = en->GetFlags();
+ }
+
+ // Store modification times?
+ if(!haveModTimes)
+ {
+ modTime = en->GetModificationTime();
+ attrModHash = en->GetAttributesHash();
+ haveModTimes = true;
+ }
+
+ // Store the name in the stream
+ en->GetName().WriteToStream(*stream);
+
+ // Count of name elements
+ ++numNameElements;
+ }
+
+ // Setup for next time round
+ objectID = dirID;
+ dirID = rdir.GetContainerID();
+
+ } while(objectID != 0 && objectID != BACKUPSTORE_ROOT_DIRECTORY_ID);
+
+ // Stream to send?
+ if(numNameElements > 0)
+ {
+ // Get the stream ready to go
+ stream->SetForReading();
+ // Tell the protocol to send the stream
+ rProtocol.SendStreamAfterCommand(stream.release());
+ }
+
+ // Make reply
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerObjectName(numNameElements, modTime, attrModHash, objectFlags));
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetBlockIndexByID::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Get the block index from a file, by ID
+// Created: 19/1/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetBlockIndexByID::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Open the file
+ std::auto_ptr<IOStream> stream(rContext.OpenObject(mObjectID));
+
+ // Move the file pointer to the block index
+ BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
+
+ // Return the stream to the client
+ rProtocol.SendStreamAfterCommand(stream.release());
+
+ // Return the object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(mObjectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetBlockIndexByName::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Get the block index from a file, by name within a directory
+// Created: 19/1/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetBlockIndexByName::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Get the directory
+ const BackupStoreDirectory &dir(rContext.GetDirectory(mInDirectory));
+
+ // Find the latest object ID within it which has the same name
+ int64_t objectID = 0;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ if(en->GetName() == mFilename)
+ {
+ // Store the ID, if it's a newer ID than the last one
+ if(en->GetObjectID() > objectID)
+ {
+ objectID = en->GetObjectID();
+ }
+ }
+ }
+
+ // Found anything?
+ if(objectID == 0)
+ {
+ // No... return a zero object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(0));
+ }
+
+ // Open the file
+ std::auto_ptr<IOStream> stream(rContext.OpenObject(objectID));
+
+ // Move the file pointer to the block index
+ BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
+
+ // Return the stream to the client
+ rProtocol.SendStreamAfterCommand(stream.release());
+
+ // Return the object ID
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerSuccess(objectID));
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupProtocolServerGetAccountUsage::DoCommand(BackupProtocolServer &, BackupContext &)
+// Purpose: Return the amount of disc space used
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<ProtocolObject> BackupProtocolServerGetAccountUsage::DoCommand(BackupProtocolServer &rProtocol, BackupContext &rContext)
+{
+ CHECK_PHASE(Phase_Commands)
+
+ // Get store info from context
+ const BackupStoreInfo &rinfo(rContext.GetBackupStoreInfo());
+
+ // Find block size
+ RaidFileController &rcontroller(RaidFileController::GetController());
+ RaidFileDiscSet &rdiscSet(rcontroller.GetDiscSet(rinfo.GetDiscSetNumber()));
+
+ // Return info
+ return std::auto_ptr<ProtocolObject>(new BackupProtocolServerAccountUsage(
+ rinfo.GetBlocksUsed(),
+ rinfo.GetBlocksInOldFiles(),
+ rinfo.GetBlocksInDeletedFiles(),
+ rinfo.GetBlocksInDirectories(),
+ rinfo.GetBlocksSoftLimit(),
+ rinfo.GetBlocksHardLimit(),
+ rdiscSet.GetBlockSize()
+ ));
+}
+
diff --git a/bin/bbstored/BackupConstants.h b/bin/bbstored/BackupConstants.h
new file mode 100755
index 00000000..515b3bcd
--- /dev/null
+++ b/bin/bbstored/BackupConstants.h
@@ -0,0 +1,23 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupConstants.h
+// Purpose: Constants for the backup server and client
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCONSTANTS__H
+#define BACKUPCONSTANTS__H
+
+#define BACKUP_STORE_DEFAULT_ACCOUNT_DATABASE_FILE "/etc/box/backupstoreaccounts"
+
+// 15 minutes to timeout (milliseconds)
+#define BACKUP_STORE_TIMEOUT (15*60*1000)
+
+// Should the store daemon convert files to Raid immediately?
+#define BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY true
+
+#endif // BACKUPCONSTANTS__H
+
+
diff --git a/bin/bbstored/BackupContext.cpp b/bin/bbstored/BackupContext.cpp
new file mode 100755
index 00000000..c796c13a
--- /dev/null
+++ b/bin/bbstored/BackupContext.cpp
@@ -0,0 +1,1650 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupContext.cpp
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdio.h>
+
+#include "BackupContext.h"
+#include "RaidFileWrite.h"
+#include "RaidFileRead.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
+#include "BackupStoreInfo.h"
+#include "BackupConstants.h"
+#include "BackupStoreFile.h"
+#include "BackupStoreObjectMagic.h"
+#include "StoreStructure.h"
+#include "BackupStoreDaemon.h"
+#include "RaidFileController.h"
+#include "FileStream.h"
+
+#include "MemLeakFindOn.h"
+
+
+// Maximum number of directories to keep in the cache
+// When the cache is bigger than this, everything gets
+// deleted.
+#ifdef NDEBUG
+ #define MAX_CACHE_SIZE 32
+#else
+ #define MAX_CACHE_SIZE 2
+#endif
+
+// Allow the housekeeping process 4 seconds to release an account
+#define MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT 4
+
+// Maximum amount of store info updates before it's actually saved to disc.
+#define STORE_INFO_SAVE_DELAY 96
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::BackupContext()
+// Purpose: Constructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupContext::BackupContext(int32_t ClientID, BackupStoreDaemon &rDaemon)
+ : mClientID(ClientID),
+ mrDaemon(rDaemon),
+ mProtocolPhase(Phase_START),
+ mClientHasAccount(false),
+ mStoreDiscSet(-1),
+ mReadOnly(true),
+ mSaveStoreInfoDelay(STORE_INFO_SAVE_DELAY)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::~BackupContext()
+// Purpose: Destructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupContext::~BackupContext()
+{
+ // Delete the objects in the cache
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ {
+ delete (i->second);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::CleanUp()
+// Purpose: Clean up after a connection
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupContext::CleanUp()
+{
+ // Make sure the store info is saved, if it has been loaded, isn't read only and has been modified
+ if(mpStoreInfo.get() && !(mpStoreInfo->IsReadOnly()) && mpStoreInfo->IsModified())
+ {
+ mpStoreInfo->Save();
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::ReceivedFinishCommand()
+// Purpose: Called when the finish command is received by the protocol
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupContext::ReceivedFinishCommand()
+{
+ if(!mReadOnly && mpStoreInfo.get())
+ {
+ // Save the store info, not delayed
+ SaveStoreInfo(false);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::AttemptToGetWriteLock()
+// Purpose: Attempt to get a write lock for the store, and if so, unset the read only flags
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+bool BackupContext::AttemptToGetWriteLock()
+{
+ // Make the filename of the write lock file
+ std::string writeLockFile;
+ StoreStructure::MakeWriteLockFilename(mStoreRoot, mStoreDiscSet, writeLockFile);
+
+ // Request the lock
+ bool gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
+
+ if(!gotLock)
+ {
+ // The housekeeping process might have the thing open -- ask it to stop
+ char msg[256];
+ int msgLen = sprintf(msg, "r%x\n", mClientID);
+ // Send message
+ mrDaemon.SendMessageToHousekeepingProcess(msg, msgLen);
+
+ // Then try again a few times
+ int tries = MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT;
+ do
+ {
+ ::sleep(1 /* second */);
+ --tries;
+ gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
+
+ } while(!gotLock && tries > 0);
+ }
+
+ if(gotLock)
+ {
+ // Got the lock, mark as not read only
+ mReadOnly = false;
+ }
+
+ return gotLock;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::LoadStoreInfo()
+// Purpose: Load the store info from disc
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+void BackupContext::LoadStoreInfo()
+{
+ if(mpStoreInfo.get() != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoAlreadyLoaded)
+ }
+
+ // Load it up!
+ std::auto_ptr<BackupStoreInfo> i(BackupStoreInfo::Load(mClientID, mStoreRoot, mStoreDiscSet, mReadOnly));
+
+ // Check it
+ if(i->GetAccountID() != mClientID)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoForWrongAccount)
+ }
+
+ // Keep the pointer to it
+ mpStoreInfo = i;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::SaveStoreInfo(bool)
+// Purpose: Potentially delayed saving of the store info
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+void BackupContext::SaveStoreInfo(bool AllowDelay)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Can delay saving it a little while?
+ if(AllowDelay)
+ {
+ --mSaveStoreInfoDelay;
+ if(mSaveStoreInfoDelay > 0)
+ {
+ return;
+ }
+ }
+
+ // Want to save now
+ mpStoreInfo->Save();
+
+ // Set count for next delay
+ mSaveStoreInfoDelay = STORE_INFO_SAVE_DELAY;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::MakeObjectFilename(int64_t, std::string &, bool)
+// Purpose: Create the filename of an object in the store, optionally creating the
+// containing directory if it doesn't already exist.
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+void BackupContext::MakeObjectFilename(int64_t ObjectID, std::string &rOutput, bool EnsureDirectoryExists)
+{
+ // Delegate to utility function
+ StoreStructure::MakeObjectFilename(ObjectID, mStoreRoot, mStoreDiscSet, rOutput, EnsureDirectoryExists);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::GetDirectoryInternal(int64_t)
+// Purpose: Return a reference to a directory. Valid only until the
+// next time a function which affects directories is called.
+// Mainly this funciton, and creation of files.
+// Private version of this, which returns non-const directories.
+// Created: 2003/09/02
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory &BackupContext::GetDirectoryInternal(int64_t ObjectID)
+{
+ // Get the filename
+ std::string filename;
+ MakeObjectFilename(ObjectID, filename);
+
+ // Already in cache?
+ std::map<int64_t, BackupStoreDirectory*>::iterator item(mDirectoryCache.find(ObjectID));
+ if(item != mDirectoryCache.end())
+ {
+ // Check the revision ID of the file -- does it need refreshing?
+ int64_t revID = 0;
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename, &revID))
+ {
+ THROW_EXCEPTION(BackupStoreException, DirectoryHasBeenDeleted)
+ }
+
+ if(revID == item->second->GetRevisionID())
+ {
+ // Looks good... return the cached object
+ return *(item->second);
+ }
+
+ // Delete this cached object
+ delete item->second;
+ mDirectoryCache.erase(item);
+ }
+
+ // Need to load it up
+
+ // First check to see if the cache is too big
+ if(mDirectoryCache.size() > MAX_CACHE_SIZE)
+ {
+ // Very simple. Just delete everything!
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ {
+ delete (i->second);
+ }
+ mDirectoryCache.clear();
+ }
+
+ // Get a RaidFileRead to read it
+ int64_t revID = 0;
+ std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename, &revID));
+ ASSERT(revID != 0);
+
+ // New directory object
+ std::auto_ptr<BackupStoreDirectory> dir(new BackupStoreDirectory);
+
+ // Read it from the stream, then set it's revision ID
+ dir->ReadFromStream(*objectFile, IOStream::TimeOutInfinite);
+ dir->SetRevisionID(revID);
+
+ // Make sure the size of the directory is available for writing the dir back
+ int64_t dirSize = objectFile->GetDiscUsageInBlocks();
+ ASSERT(dirSize > 0);
+ dir->SetUserInfo1_SizeInBlocks(dirSize);
+
+ // Store in cache
+ BackupStoreDirectory *pdir = dir.release();
+ try
+ {
+ mDirectoryCache[ObjectID] = pdir;
+ }
+ catch(...)
+ {
+ delete pdir;
+ throw;
+ }
+
+ // Return it
+ return *pdir;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::AllocateObjectID()
+// Purpose: Allocate a new object ID, tolerant of failures to save store info
+// Created: 16/12/03
+//
+// --------------------------------------------------------------------------
+int64_t BackupContext::AllocateObjectID()
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Given that the store info may not be saved for STORE_INFO_SAVE_DELAY
+ // times after it has been updated, this is a reasonable number of times
+ // to try for finding an unused ID.
+ // (Sizes used in the store info are fixed by the housekeeping process)
+ int retryLimit = (STORE_INFO_SAVE_DELAY * 2);
+
+ while(retryLimit > 0)
+ {
+ // Attempt to allocate an ID from the store
+ int64_t id = mpStoreInfo->AllocateObjectID();
+
+ // Generate filename
+ std::string filename;
+ MakeObjectFilename(id, filename);
+ // Check it doesn't exist
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename))
+ {
+ // Success!
+ return id;
+ }
+
+ // Decrement retry count, and try again
+ --retryLimit;
+
+ // Mark that the store info should be saved as soon as possible
+ mSaveStoreInfoDelay = 0;
+
+ TRACE1("When allocating object ID, found that %lld is already in use\n", id);
+ }
+
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindUnusedIDDuringAllocation)
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::AddFile(IOStream &, int64_t, int64_t, int64_t, const BackupStoreFilename &, bool)
+// Purpose: Add a file to the store, from a given stream, into a specified directory.
+// Returns object ID of the new file.
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+int64_t BackupContext::AddFile(IOStream &rFile, int64_t InDirectory, int64_t ModificationTime,
+ int64_t AttributesHash, int64_t DiffFromFileID, const BackupStoreFilename &rFilename,
+ bool MarkFileWithSameNameAsOldVersions)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // This is going to be a bit complex to make sure it copes OK
+ // with things going wrong.
+ // The only thing which isn't safe is incrementing the object ID
+ // and keeping the blocks used entirely accurate -- but these
+ // aren't big problems if they go horribly wrong. The sizes will
+ // be corrected the next time the account has a housekeeping run,
+ // and the object ID allocation code is tolerant of missed IDs.
+ // (the info is written lazily, so these are necessary)
+
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Allocate the next ID
+ int64_t id = AllocateObjectID();
+
+ // Stream the file to disc
+ std::string fn;
+ MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
+ int64_t blocksUsed = 0;
+ RaidFileWrite *ppreviousVerStoreFile = 0;
+ bool reversedDiffIsCompletelyDifferent = false;
+ int64_t oldVersionNewBlocksUsed = 0;
+ try
+ {
+ RaidFileWrite storeFile(mStoreDiscSet, fn);
+ storeFile.Open(false /* no overwriting */);
+ int64_t spaceAdjustFromDiff = 0; // size adjustment from use of patch in old file
+
+ // Diff or full file?
+ if(DiffFromFileID == 0)
+ {
+ // A full file, just store to disc
+ if(!rFile.CopyStreamTo(storeFile, BACKUP_STORE_TIMEOUT))
+ {
+ THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
+ }
+ }
+ else
+ {
+ // Check that the diffed from ID actually exists in the directory
+ if(dir.FindEntryByID(DiffFromFileID) == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, DiffFromIDNotFoundInDirectory)
+ }
+
+ // Diff file, needs to be recreated.
+ // Choose a temporary filename.
+ std::string tempFn(RaidFileController::DiscSetPathToFileSystemPath(mStoreDiscSet, fn + ".difftemp",
+ 1 /* NOT the same disc as the write file, to avoid using lots of space on the same disc unnecessarily */));
+
+ try
+ {
+ // Open it twice
+ FileStream diff(tempFn.c_str(), O_RDWR | O_CREAT | O_EXCL);
+ FileStream diff2(tempFn.c_str(), O_RDONLY);
+ // Unlink it immediately, so it definately goes away
+ if(::unlink(tempFn.c_str()) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ // Stream the incoming diff to this temporary file
+ if(!rFile.CopyStreamTo(diff, BACKUP_STORE_TIMEOUT))
+ {
+ THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
+ }
+
+ // Verify the diff
+ diff.Seek(0, IOStream::SeekType_Absolute);
+ if(!BackupStoreFile::VerifyEncodedFileFormat(diff))
+ {
+ THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
+ }
+
+ // Seek to beginning of diff file
+ diff.Seek(0, IOStream::SeekType_Absolute);
+
+ // Filename of the old version
+ std::string oldVersionFilename;
+ MakeObjectFilename(DiffFromFileID, oldVersionFilename, false /* no need to make sure the directory it's in exists */);
+
+ // Reassemble that diff -- open previous file, and combine the patch and file
+ std::auto_ptr<RaidFileRead> from(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
+ BackupStoreFile::CombineFile(diff, diff2, *from, storeFile);
+
+ // Then... reverse the patch back (open the from file again, and create a write file to overwrite it)
+ std::auto_ptr<RaidFileRead> from2(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
+ ppreviousVerStoreFile = new RaidFileWrite(mStoreDiscSet, oldVersionFilename);
+ ppreviousVerStoreFile->Open(true /* allow overwriting */);
+ from->Seek(0, IOStream::SeekType_Absolute);
+ diff.Seek(0, IOStream::SeekType_Absolute);
+ BackupStoreFile::ReverseDiffFile(diff, *from, *from2, *ppreviousVerStoreFile,
+ DiffFromFileID, &reversedDiffIsCompletelyDifferent);
+
+ // Store disc space used
+ oldVersionNewBlocksUsed = ppreviousVerStoreFile->GetDiscUsageInBlocks();
+
+ // And make a space adjustment for the size calculation
+ spaceAdjustFromDiff = from->GetDiscUsageInBlocks() - oldVersionNewBlocksUsed;
+
+ // Everything cleans up here...
+ }
+ catch(...)
+ {
+ // Be very paranoid about deleting this temp file -- we could only leave a zero byte file anyway
+ ::unlink(tempFn.c_str());
+ throw;
+ }
+ }
+
+ // Get the blocks used
+ blocksUsed = storeFile.GetDiscUsageInBlocks();
+
+ // Exceeds the hard limit?
+ if((mpStoreInfo->GetBlocksUsed() + blocksUsed - spaceAdjustFromDiff) > mpStoreInfo->GetBlocksHardLimit())
+ {
+ THROW_EXCEPTION(BackupStoreException, AddedFileExceedsStorageLimit)
+ // The store file will be deleted automatically by the RaidFile object
+ }
+
+ // Commit the file
+ storeFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ }
+ catch(...)
+ {
+ // Delete any previous version store file
+ if(ppreviousVerStoreFile != 0)
+ {
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+
+ throw;
+ }
+
+ // Verify the file -- only necessary for non-diffed versions
+ // NOTE: No need to catch exceptions and delete ppreviousVerStoreFile, because
+ // in the non-diffed code path it's never allocated.
+ if(DiffFromFileID == 0)
+ {
+ std::auto_ptr<RaidFileRead> checkFile(RaidFileRead::Open(mStoreDiscSet, fn));
+ if(!BackupStoreFile::VerifyEncodedFileFormat(*checkFile))
+ {
+ // Error! Delete the file
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Exception
+ THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
+ }
+ }
+
+ // Modify the directory -- first make all files with the same name
+ // marked as an old version
+ int64_t blocksInOldFiles = 0;
+ try
+ {
+ if(MarkFileWithSameNameAsOldVersions)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+
+ BackupStoreDirectory::Entry *e = 0;
+ while((e = i.Next()) != 0)
+ {
+ // First, check it's not an old version (cheaper comparison)
+ if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
+ {
+ // Compare name
+ if(e->GetName() == rFilename)
+ {
+ // Check that it's definately not an old version
+ ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0);
+ // Set old version flag
+ e->AddFlags(BackupStoreDirectory::Entry::Flags_OldVersion);
+ // Can safely do this, because we know we won't be here if it's already
+ // an old version
+ blocksInOldFiles += e->GetSizeInBlocks();
+ }
+ }
+ }
+ }
+
+ // Then the new entry
+ BackupStoreDirectory::Entry *pnewEntry = dir.AddEntry(rFilename,
+ ModificationTime, id, blocksUsed, BackupStoreDirectory::Entry::Flags_File, AttributesHash);
+
+ // Adjust for the patch back stuff?
+ if(DiffFromFileID != 0)
+ {
+ // Get old version entry
+ BackupStoreDirectory::Entry *poldEntry = dir.FindEntryByID(DiffFromFileID);
+ ASSERT(poldEntry != 0);
+
+ // Adjust dependency info of file?
+ if(!reversedDiffIsCompletelyDifferent)
+ {
+ poldEntry->SetDependsNewer(id);
+ pnewEntry->SetDependsOlder(DiffFromFileID);
+ }
+
+ // Adjust size of old entry
+ int64_t oldSize = poldEntry->GetSizeInBlocks();
+ poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
+
+ // And adjust blocks used count, for later adjustment
+ blocksUsed += (oldVersionNewBlocksUsed - oldSize);
+ blocksInOldFiles += (oldVersionNewBlocksUsed - oldSize);
+ }
+
+ // Write the directory back to disc
+ SaveDirectory(dir, InDirectory);
+
+ // Commit the old version's new patched version, now that the directory safely reflects
+ // the state of the files on disc.
+ if(ppreviousVerStoreFile != 0)
+ {
+ ppreviousVerStoreFile->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+ }
+ catch(...)
+ {
+ // Back out on adding that file
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Remove this entry from the cache
+ RemoveDirectoryFromCache(InDirectory);
+
+ // Delete any previous version store file
+ if(ppreviousVerStoreFile != 0)
+ {
+ delete ppreviousVerStoreFile;
+ ppreviousVerStoreFile = 0;
+ }
+
+ // Don't worry about the incremented number in the store info
+ throw;
+ }
+
+ // Check logic
+ ASSERT(ppreviousVerStoreFile == 0);
+
+ // Modify the store info
+ mpStoreInfo->ChangeBlocksUsed(blocksUsed);
+ mpStoreInfo->ChangeBlocksInOldFiles(blocksInOldFiles);
+
+ // Save the store info -- can cope if this exceptions because infomation
+ // will be rebuilt by housekeeping, and ID allocation can recover.
+ SaveStoreInfo();
+
+ // Return the ID to the caller
+ return id;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::DeleteFile(const BackupStoreFilename &, int64_t, int64_t &)
+// Purpose: Deletes a file, returning true if the file existed. Object ID returned too, set to zero if not found.
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+bool BackupContext::DeleteFile(const BackupStoreFilename &rFilename, int64_t InDirectory, int64_t &rObjectIDOut)
+{
+ // Essential checks!
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Find the directory the file is in (will exception if it fails)
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Setup flags
+ bool fileExisted = false;
+ bool madeChanges = false;
+ rObjectIDOut = 0; // not found
+
+ // Count of deleted blocks
+ int64_t blocksDel = 0;
+
+ try
+ {
+ // Iterate through directory, only looking at files which haven't been deleted
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *e = 0;
+ while((e = i.Next(BackupStoreDirectory::Entry::Flags_File,
+ BackupStoreDirectory::Entry::Flags_Deleted)) != 0)
+ {
+ // Compare name
+ if(e->GetName() == rFilename)
+ {
+ // Check that it's definately not already deleted
+ ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) == 0);
+ // Set deleted flag
+ e->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ // Mark as made a change
+ madeChanges = true;
+ // Can safely do this, because we know we won't be here if it's already
+ // an old version
+ blocksDel += e->GetSizeInBlocks();
+ // Is this the last version?
+ if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
+ {
+ // Yes. It's been found.
+ rObjectIDOut = e->GetObjectID();
+ fileExisted = true;
+ }
+ }
+ }
+
+ // Save changes?
+ if(madeChanges)
+ {
+ // Save the directory back
+ SaveDirectory(dir, InDirectory);
+
+ // Modify the store info, and write
+ mpStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
+
+ // Maybe postponed save of store info
+ SaveStoreInfo();
+ }
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+
+
+ return fileExisted;
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::RemoveDirectoryFromCache(int64_t)
+// Purpose: Remove directory from cache
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+void BackupContext::RemoveDirectoryFromCache(int64_t ObjectID)
+{
+ std::map<int64_t, BackupStoreDirectory*>::iterator item(mDirectoryCache.find(ObjectID));
+ if(item != mDirectoryCache.end())
+ {
+ // Delete this cached object
+ delete item->second;
+ // Erase the entry form the map
+ mDirectoryCache.erase(item);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::SaveDirectory(BackupStoreDirectory &, int64_t)
+// Purpose: Save directory back to disc, update time in cache
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+void BackupContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(rDir.GetObjectID() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+
+ try
+ {
+ // Write to disc, adjust size in store info
+ std::string dirfn;
+ MakeObjectFilename(ObjectID, dirfn);
+ {
+ RaidFileWrite writeDir(mStoreDiscSet, dirfn);
+ writeDir.Open(true /* allow overwriting */);
+ rDir.WriteToStream(writeDir);
+
+ // get the disc usage (must do this before commiting it)
+ int64_t dirSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // Make sure the size of the directory is available for writing the dir back
+ ASSERT(dirSize > 0);
+ int64_t sizeAdjustment = dirSize - rDir.GetUserInfo1_SizeInBlocks();
+ mpStoreInfo->ChangeBlocksUsed(sizeAdjustment);
+ mpStoreInfo->ChangeBlocksInDirectories(sizeAdjustment);
+ // Update size stored in directory
+ rDir.SetUserInfo1_SizeInBlocks(dirSize);
+ }
+ // Refresh revision ID in cache
+ {
+ int64_t revid = 0;
+ if(!RaidFileRead::FileExists(mStoreDiscSet, dirfn, &revid))
+ {
+ THROW_EXCEPTION(BackupStoreException, Internal)
+ }
+ rDir.SetRevisionID(revid);
+ }
+ }
+ catch(...)
+ {
+ // Remove it from the cache if anything went wrong
+ RemoveDirectoryFromCache(ObjectID);
+ throw;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::AddDirectory(int64_t, const BackupStoreFilename &, bool &)
+// Purpose: Creates a directory (or just returns the ID of an existing one). rAlreadyExists set appropraitely.
+// Created: 2003/09/04
+//
+// --------------------------------------------------------------------------
+int64_t BackupContext::AddDirectory(int64_t InDirectory, const BackupStoreFilename &rFilename, const StreamableMemBlock &Attributes, int64_t AttributesModTime, bool &rAlreadyExists)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Flags as not already existing
+ rAlreadyExists = false;
+
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Scan the directory for the name (only looking for directories which already exist)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING,
+ BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0) // Ignore deleted and old directories
+ {
+ if(en->GetName() == rFilename)
+ {
+ // Already exists
+ rAlreadyExists = true;
+ return en->GetObjectID();
+ }
+ }
+ }
+
+ // Allocate the next ID
+ int64_t id = AllocateObjectID();
+
+ // Create a blank directory with the given attributes on disc
+ std::string fn;
+ MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
+ {
+ BackupStoreDirectory emptyDir(id, InDirectory);
+ // add the atttribues
+ emptyDir.SetAttributes(Attributes, AttributesModTime);
+
+ // Write...
+ RaidFileWrite dirFile(mStoreDiscSet, fn);
+ dirFile.Open(false /* no overwriting */);
+ emptyDir.WriteToStream(dirFile);
+ // Get disc usage, before it's commited
+ int64_t dirSize = dirFile.GetDiscUsageInBlocks();
+ // Commit the file
+ dirFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // Make sure the size of the directory is added to the usage counts in the info
+ ASSERT(dirSize > 0);
+ mpStoreInfo->ChangeBlocksUsed(dirSize);
+ mpStoreInfo->ChangeBlocksInDirectories(dirSize);
+ // Not added to cache, so don't set the size in the directory
+ }
+
+ // Then add it into the directory
+ try
+ {
+ dir.AddEntry(rFilename, 0 /* modification time */, id, 0 /* blocks used */, BackupStoreDirectory::Entry::Flags_Dir, 0 /* attributes mod time */);
+ SaveDirectory(dir, InDirectory);
+ }
+ catch(...)
+ {
+ // Back out on adding that directory
+ RaidFileWrite del(mStoreDiscSet, fn);
+ del.Delete();
+
+ // Remove this entry from the cache
+ RemoveDirectoryFromCache(InDirectory);
+
+ // Don't worry about the incremented number in the store info
+ throw;
+ }
+
+ // Save the store info (may be postponed)
+ SaveStoreInfo();
+
+ // tell caller what the ID was
+ return id;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::DeleteFile(const BackupStoreFilename &, int64_t, int64_t &, bool)
+// Purpose: Recusively deletes a directory (or undeletes if Undelete = true)
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+void BackupContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
+{
+ // Essential checks!
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Containing directory
+ int64_t InDirectory = 0;
+
+ // Count of blocks deleted
+ int64_t blocksDeleted = 0;
+
+ try
+ {
+ // Get the directory that's to be deleted
+ {
+ // In block, because dir may not be valid after the delete directory call
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Store the directory it's in for later
+ InDirectory = dir.GetContainerID();
+
+ // Depth first delete of contents
+ DeleteDirectoryRecurse(ObjectID, blocksDeleted, Undelete);
+ }
+
+ // Remove the entry from the directory it's in
+ ASSERT(InDirectory != 0);
+ BackupStoreDirectory &parentDir(GetDirectoryInternal(InDirectory));
+
+ BackupStoreDirectory::Iterator i(parentDir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
+ Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0) // Ignore deleted directories (or not deleted if Undelete)
+ {
+ if(en->GetObjectID() == ObjectID)
+ {
+ // This is the one to delete
+ if(Undelete)
+ {
+ en->RemoveFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+ else
+ {
+ en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+
+ // Save it
+ SaveDirectory(parentDir, InDirectory);
+
+ // Done
+ break;
+ }
+ }
+
+ // Update blocks deleted count
+ mpStoreInfo->ChangeBlocksInDeletedFiles(Undelete?(0 - blocksDeleted):(blocksDeleted));
+
+ // Save store info, may be postponed
+ SaveStoreInfo();
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::DeleteDirectoryRecurse(BackupStoreDirectory &, int64_t)
+// Purpose: Private. Deletes a directory depth-first recusively.
+// Created: 2003/10/21
+//
+// --------------------------------------------------------------------------
+void BackupContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete)
+{
+ try
+ {
+ // Does things carefully to avoid using a directory in the cache after recursive call
+ // because it may have been deleted.
+
+ // Do sub directories
+ {
+ // Get the directory...
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Then scan it for directories
+ std::vector<int64_t> subDirs;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ if(Undelete)
+ {
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir | BackupStoreDirectory::Entry::Flags_Deleted, // deleted dirs
+ BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING)) != 0)
+ {
+ // Store the directory ID.
+ subDirs.push_back(en->GetObjectID());
+ }
+ }
+ else
+ {
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir, // dirs only
+ BackupStoreDirectory::Entry::Flags_Deleted)) != 0) // but not deleted ones
+ {
+ // Store the directory ID.
+ subDirs.push_back(en->GetObjectID());
+ }
+ }
+
+ // Done with the directory for now. Recurse to sub directories
+ for(std::vector<int64_t>::const_iterator i = subDirs.begin(); i != subDirs.end(); ++i)
+ {
+ DeleteDirectoryRecurse((*i), rBlocksDeletedOut, Undelete);
+ }
+ }
+
+ // Then, delete the files. Will need to load the directory again because it might have
+ // been removed from the cache.
+ {
+ // Get the directory...
+ BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
+
+ // Changes made?
+ bool changesMade = false;
+
+ // Run through files
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+
+ while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
+ Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0) // Ignore deleted directories (or not deleted if Undelete)
+ {
+ // Add/remove the deleted flags
+ if(Undelete)
+ {
+ en->RemoveFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+ else
+ {
+ en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
+ }
+
+ // Keep count of the deleted blocks
+ if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
+ {
+ rBlocksDeletedOut += en->GetSizeInBlocks();
+ }
+
+ // Did something
+ changesMade = true;
+ }
+
+ // Save the directory
+ if(changesMade)
+ {
+ SaveDirectory(dir, ObjectID);
+ }
+ }
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(ObjectID);
+ throw;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::ChangeDirAttributes(int64_t, const StreamableMemBlock &, int64_t)
+// Purpose: Change the attributes of a directory
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+void BackupContext::ChangeDirAttributes(int64_t Directory, const StreamableMemBlock &Attributes, int64_t AttributesModTime)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ try
+ {
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(Directory));
+
+ // Set attributes
+ dir.SetAttributes(Attributes, AttributesModTime);
+
+ // Save back
+ SaveDirectory(dir, Directory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(Directory);
+ throw;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::ChangeFileAttributes(int64_t, int64_t, const StreamableMemBlock &, int64_t)
+// Purpose: Sets the attributes on a directory entry. Returns true if the object existed, false if it didn't.
+// Created: 2003/09/06
+//
+// --------------------------------------------------------------------------
+bool BackupContext::ChangeFileAttributes(const BackupStoreFilename &rFilename, int64_t InDirectory, const StreamableMemBlock &Attributes, int64_t AttributesHash, int64_t &rObjectIDOut)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ try
+ {
+ // Get the directory we want to modify
+ BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = 0;
+ // Iterate through current versions of files, only
+ BackupStoreDirectory::Iterator i(dir);
+ while((en = i.Next(
+ BackupStoreDirectory::Entry::Flags_File,
+ BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)
+ ) != 0)
+ {
+ if(en->GetName() == rFilename)
+ {
+ // Set attributes
+ en->SetAttributes(Attributes, AttributesHash);
+
+ // Tell caller the object ID
+ rObjectIDOut = en->GetObjectID();
+
+ // Done
+ break;
+ }
+ }
+ if(en == 0)
+ {
+ // Didn't find it
+ return false;
+ }
+
+ // Save back
+ SaveDirectory(dir, InDirectory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(InDirectory);
+ throw;
+ }
+
+ // Changed, everything OK
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::ObjectExists(int64_t)
+// Purpose: Test to see if an object of this ID exists in the store
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+bool BackupContext::ObjectExists(int64_t ObjectID, int MustBe)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Note that we need to allow object IDs a little bit greater than the last one in the store info,
+ // because the store info may not have got saved in an error condition. Max greater ID is
+ // STORE_INFO_SAVE_DELAY in this case, *2 to be safe.
+ if(ObjectID <= 0 || ObjectID > (mpStoreInfo->GetLastObjectIDUsed() + (STORE_INFO_SAVE_DELAY * 2)))
+ {
+ // Obviously bad object ID
+ return false;
+ }
+
+ // Test to see if it exists on the disc
+ std::string filename;
+ MakeObjectFilename(ObjectID, filename);
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename))
+ {
+ // RaidFile reports no file there
+ return false;
+ }
+
+ // Do we need to be more specific?
+ if(MustBe != ObjectExists_Anything)
+ {
+ // Open the file
+ std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename));
+
+ // Read the first integer
+ u_int32_t magic;
+ if(!objectFile->ReadFullBuffer(&magic, sizeof(magic), 0 /* not interested in how many read if failure */))
+ {
+ // Failed to get any bytes, must have failed
+ return false;
+ }
+
+#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
+ if(MustBe == ObjectExists_File && ntohl(magic) == OBJECTMAGIC_FILE_MAGIC_VALUE_V0)
+ {
+ // Old version detected
+ return true;
+ }
+#endif
+
+ // Right one?
+ u_int32_t requiredMagic = (MustBe == ObjectExists_File)?OBJECTMAGIC_FILE_MAGIC_VALUE_V1:OBJECTMAGIC_DIR_MAGIC_VALUE;
+
+ // Check
+ if(ntohl(magic) != requiredMagic)
+ {
+ return false;
+ }
+
+ // File is implicitly closed
+ }
+
+ return true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::OpenObject(int64_t)
+// Purpose: Opens an object
+// Created: 2003/09/03
+//
+// --------------------------------------------------------------------------
+std::auto_ptr<IOStream> BackupContext::OpenObject(int64_t ObjectID)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ // Attempt to open the file
+ std::string fn;
+ MakeObjectFilename(ObjectID, fn);
+ return std::auto_ptr<IOStream>(RaidFileRead::Open(mStoreDiscSet, fn).release());
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::GetClientStoreMarker()
+// Purpose: Retrieve the client store marker
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+int64_t BackupContext::GetClientStoreMarker()
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return mpStoreInfo->GetClientStoreMarker();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::GetStoreDiscUsageInfo(int64_t &, int64_t &, int64_t &)
+// Purpose: Get disc usage info from store info
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+void BackupContext::GetStoreDiscUsageInfo(int64_t &rBlocksUsed, int64_t &rBlocksSoftLimit, int64_t &rBlocksHardLimit)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ rBlocksUsed = mpStoreInfo->GetBlocksUsed();
+ rBlocksSoftLimit = mpStoreInfo->GetBlocksSoftLimit();
+ rBlocksHardLimit = mpStoreInfo->GetBlocksHardLimit();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::HardLimitExceeded()
+// Purpose: Returns true if the hard limit has been exceeded
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+bool BackupContext::HardLimitExceeded()
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return mpStoreInfo->GetBlocksUsed() > mpStoreInfo->GetBlocksHardLimit();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::SetClientStoreMarker(int64_t)
+// Purpose: Sets the client store marker, and commits it to disc
+// Created: 2003/10/29
+//
+// --------------------------------------------------------------------------
+void BackupContext::SetClientStoreMarker(int64_t ClientStoreMarker)
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ mpStoreInfo->SetClientStoreMarker(ClientStoreMarker);
+ SaveStoreInfo(false /* don't delay saving this */);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::MoveObject(int64_t, int64_t, int64_t, const BackupStoreFilename &, bool)
+// Purpose: Move an object (and all objects with the same name) from one directory to another
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+void BackupContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory, int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename, bool MoveAllWithSameName, bool AllowMoveOverDeletedObject)
+{
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
+ }
+
+ // Should deleted files be excluded when checking for the existance of objects with the target name?
+ int64_t targetSearchExcludeFlags = (AllowMoveOverDeletedObject)
+ ?(BackupStoreDirectory::Entry::Flags_Deleted)
+ :(BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING);
+
+ // Special case if the directories are the same...
+ if(MoveFromDirectory == MoveToDirectory)
+ {
+ try
+ {
+ // Get the first directory
+ BackupStoreDirectory &dir(GetDirectoryInternal(MoveFromDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = dir.FindEntryByID(ObjectID);
+
+ // Error if not found
+ if(en == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+ }
+
+ // Check the new name doens't already exist (optionally ignoring deleted files)
+ {
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING, targetSearchExcludeFlags)) != 0)
+ {
+ if(c->GetName() == rNewFilename)
+ {
+ THROW_EXCEPTION(BackupStoreException, NameAlreadyExistsInDirectory)
+ }
+ }
+ }
+
+ // Need to get all the entries with the same name?
+ if(MoveAllWithSameName)
+ {
+ // Iterate through the directory, copying all with matching names
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next()) != 0)
+ {
+ if(c->GetName() == en->GetName())
+ {
+ // Rename this one
+ c->SetName(rNewFilename);
+ }
+ }
+ }
+ else
+ {
+ // Just copy this one
+ en->SetName(rNewFilename);
+ }
+
+ // Save the directory back
+ SaveDirectory(dir, MoveFromDirectory);
+ }
+ catch(...)
+ {
+ RemoveDirectoryFromCache(MoveToDirectory); // either will do, as they're the same
+ throw;
+ }
+
+ return;
+ }
+
+ // Got to be careful how this is written, as we can't guarentte that if we have two
+ // directories open, the first won't be deleted as the second is opened. (cache)
+
+ // List of entries to move
+ std::vector<BackupStoreDirectory::Entry *> moving;
+
+ // list of directory IDs which need to have containing dir id changed
+ std::vector<int64_t> dirsToChangeContainingID;
+
+ try
+ {
+ // First of all, get copies of the entries to move to the to directory.
+
+ {
+ // Get the first directory
+ BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
+
+ // Find the file entry
+ BackupStoreDirectory::Entry *en = from.FindEntryByID(ObjectID);
+
+ // Error if not found
+ if(en == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+ }
+
+ // Need to get all the entries with the same name?
+ if(MoveAllWithSameName)
+ {
+ // Iterate through the directory, copying all with matching names
+ BackupStoreDirectory::Iterator i(from);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next()) != 0)
+ {
+ if(c->GetName() == en->GetName())
+ {
+ // Copy
+ moving.push_back(new BackupStoreDirectory::Entry(*c));
+
+ // Check for containing directory correction
+ if(c->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(c->GetObjectID());
+ }
+ }
+ ASSERT(!moving.empty());
+ }
+ else
+ {
+ // Just copy this one
+ moving.push_back(new BackupStoreDirectory::Entry(*en));
+
+ // Check for containing directory correction
+ if(en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(en->GetObjectID());
+ }
+ }
+
+ // Secondly, insert them into the to directory, and save it
+
+ {
+ // To directory
+ BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
+
+ // Check the new name doens't already exist
+ {
+ BackupStoreDirectory::Iterator i(to);
+ BackupStoreDirectory::Entry *c = 0;
+ while((c = i.Next(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING, targetSearchExcludeFlags)) != 0)
+ {
+ if(c->GetName() == rNewFilename)
+ {
+ THROW_EXCEPTION(BackupStoreException, NameAlreadyExistsInDirectory)
+ }
+ }
+ }
+
+ // Copy the entries into it, changing the name as we go
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ BackupStoreDirectory::Entry *en = (*i);
+ en->SetName(rNewFilename);
+ to.AddEntry(*en); // adds copy
+ }
+
+ // Save back
+ SaveDirectory(to, MoveToDirectory);
+ }
+
+ // Thirdly... remove them from the first directory -- but if it fails, attempt to delete them from the to directory
+ try
+ {
+ // Get directory
+ BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
+
+ // Delete each one
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ from.DeleteEntry((*i)->GetObjectID());
+ }
+
+ // Save back
+ SaveDirectory(from, MoveFromDirectory);
+ }
+ catch(...)
+ {
+ // UNDO modification to To directory
+
+ // Get directory
+ BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
+
+ // Delete each one
+ for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
+ {
+ to.DeleteEntry((*i)->GetObjectID());
+ }
+
+ // Save back
+ SaveDirectory(to, MoveToDirectory);
+
+ // Throw the error
+ throw;
+ }
+
+ // Finally... for all the directories we moved, modify their containing directory ID
+ for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
+ {
+ // Load the directory
+ BackupStoreDirectory &change(GetDirectoryInternal(*i));
+
+ // Modify containing dir ID
+ change.SetContainerID(MoveToDirectory);
+
+ // Save it back
+ SaveDirectory(change, *i);
+ }
+ }
+ catch(...)
+ {
+ // Make sure directories aren't in the cache, as they may have been modified
+ RemoveDirectoryFromCache(MoveToDirectory);
+ RemoveDirectoryFromCache(MoveFromDirectory);
+ for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
+ {
+ RemoveDirectoryFromCache(*i);
+ }
+
+ while(!moving.empty())
+ {
+ delete moving.back();
+ moving.pop_back();
+ }
+ throw;
+ }
+
+ // Clean up
+ while(!moving.empty())
+ {
+ delete moving.back();
+ moving.pop_back();
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupContext::GetBackupStoreInfo()
+// Purpose: Return the backup store info object, exception if it isn't loaded
+// Created: 19/4/04
+//
+// --------------------------------------------------------------------------
+const BackupStoreInfo &BackupContext::GetBackupStoreInfo() const
+{
+ if(mpStoreInfo.get() == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
+ }
+
+ return *(mpStoreInfo.get());
+}
+
+
diff --git a/bin/bbstored/BackupContext.h b/bin/bbstored/BackupContext.h
new file mode 100755
index 00000000..18f2f25c
--- /dev/null
+++ b/bin/bbstored/BackupContext.h
@@ -0,0 +1,149 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupContext.h
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCONTEXT__H
+#define BACKUPCONTEXT__H
+
+#include <string>
+#include <map>
+#include <memory>
+
+#include "NamedLock.h"
+#include "Utils.h"
+
+class BackupStoreDirectory;
+class BackupStoreFilename;
+class BackupStoreDaemon;
+class BackupStoreInfo;
+class IOStream;
+class StreamableMemBlock;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupContext
+// Purpose: Context for backup store server
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+class BackupContext
+{
+public:
+ BackupContext(int32_t ClientID, BackupStoreDaemon &rDaemon);
+ ~BackupContext();
+private:
+ BackupContext(const BackupContext &rToCopy);
+public:
+
+ void ReceivedFinishCommand();
+ void CleanUp();
+
+ int32_t GetClientID() {return mClientID;}
+
+ enum
+ {
+ Phase_START = 0,
+ Phase_Version = 0,
+ Phase_Login = 1,
+ Phase_Commands = 2
+ };
+
+ int GetPhase() const {return mProtocolPhase;}
+ void SetPhase(int NewPhase) {mProtocolPhase = NewPhase;}
+
+ // Read only locking
+ bool SessionIsReadOnly() {return mReadOnly;}
+ bool AttemptToGetWriteLock();
+
+ void SetClientHasAccount(const std::string &rStoreRoot, int StoreDiscSet) {mClientHasAccount = true; mStoreRoot = rStoreRoot; mStoreDiscSet = StoreDiscSet;}
+ bool GetClientHasAccount() const {return mClientHasAccount;}
+ const std::string &GetStoreRoot() const {return mStoreRoot;}
+ int GetStoreDiscSet() const {return mStoreDiscSet;}
+
+ // Store info
+ void LoadStoreInfo();
+ void SaveStoreInfo(bool AllowDelay = true);
+ const BackupStoreInfo &GetBackupStoreInfo() const;
+
+ // Client marker
+ int64_t GetClientStoreMarker();
+ void SetClientStoreMarker(int64_t ClientStoreMarker);
+
+ // Usage information
+ void GetStoreDiscUsageInfo(int64_t &rBlocksUsed, int64_t &rBlocksSoftLimit, int64_t &rBlocksHardLimit);
+ bool HardLimitExceeded();
+
+ // Reading directories
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupContext::GetDirectory(int64_t)
+ // Purpose: Return a reference to a directory. Valid only until the
+ // next time a function which affects directories is called.
+ // Mainly this funciton, and creation of files.
+ // Created: 2003/09/02
+ //
+ // --------------------------------------------------------------------------
+ const BackupStoreDirectory &GetDirectory(int64_t ObjectID)
+ {
+ // External callers aren't allowed to change it -- this function
+ // merely turns the the returned directory const.
+ return GetDirectoryInternal(ObjectID);
+ }
+
+ // Manipulating files/directories
+ int64_t AddFile(IOStream &rFile, int64_t InDirectory, int64_t ModificationTime, int64_t AttributesHash, int64_t DiffFromFileID, const BackupStoreFilename &rFilename, bool MarkFileWithSameNameAsOldVersions);
+ int64_t AddDirectory(int64_t InDirectory, const BackupStoreFilename &rFilename, const StreamableMemBlock &Attributes, int64_t AttributesModTime, bool &rAlreadyExists);
+ void ChangeDirAttributes(int64_t Directory, const StreamableMemBlock &Attributes, int64_t AttributesModTime);
+ bool ChangeFileAttributes(const BackupStoreFilename &rFilename, int64_t InDirectory, const StreamableMemBlock &Attributes, int64_t AttributesHash, int64_t &rObjectIDOut);
+ bool DeleteFile(const BackupStoreFilename &rFilename, int64_t InDirectory, int64_t &rObjectIDOut);
+ void DeleteDirectory(int64_t ObjectID, bool Undelete = false);
+ void MoveObject(int64_t ObjectID, int64_t MoveFromDirectory, int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename, bool MoveAllWithSameName, bool AllowMoveOverDeletedObject);
+
+ // Manipulating objects
+ enum
+ {
+ ObjectExists_Anything = 0,
+ ObjectExists_File = 1,
+ ObjectExists_Directory = 2
+ };
+ bool ObjectExists(int64_t ObjectID, int MustBe = ObjectExists_Anything);
+ std::auto_ptr<IOStream> OpenObject(int64_t ObjectID);
+
+ // Info
+ int32_t GetClientID() const {return mClientID;}
+
+private:
+ void MakeObjectFilename(int64_t ObjectID, std::string &rOutput, bool EnsureDirectoryExists = false);
+ BackupStoreDirectory &GetDirectoryInternal(int64_t ObjectID);
+ void SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID);
+ void RemoveDirectoryFromCache(int64_t ObjectID);
+ void DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete);
+ int64_t AllocateObjectID();
+
+private:
+ int32_t mClientID;
+ BackupStoreDaemon &mrDaemon;
+ int mProtocolPhase;
+ bool mClientHasAccount;
+ std::string mStoreRoot; // has final directory separator
+ int mStoreDiscSet;
+ bool mReadOnly;
+ NamedLock mWriteLock;
+ int mSaveStoreInfoDelay; // how many times to delay saving the store info
+
+ // Store info
+ std::auto_ptr<BackupStoreInfo> mpStoreInfo;
+
+ // Directory cache
+ std::map<int64_t, BackupStoreDirectory*> mDirectoryCache;
+};
+
+#endif // BACKUPCONTEXT__H
+
diff --git a/bin/bbstored/BackupStoreDaemon.cpp b/bin/bbstored/BackupStoreDaemon.cpp
new file mode 100755
index 00000000..0afdaa5d
--- /dev/null
+++ b/bin/bbstored/BackupStoreDaemon.cpp
@@ -0,0 +1,284 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreDaemon.cpp
+// Purpose: Backup store daemon
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <syslog.h>
+#include <signal.h>
+
+#include "BackupContext.h"
+#include "BackupStoreDaemon.h"
+#include "BackupStoreConfigVerify.h"
+#include "autogen_BackupProtocolServer.h"
+#include "RaidFileController.h"
+#include "BackupStoreAccountDatabase.h"
+#include "BackupStoreAccounts.h"
+#include "BannerText.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::BackupStoreDaemon()
+// Purpose: Constructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupStoreDaemon::BackupStoreDaemon()
+ : mpAccountDatabase(0),
+ mpAccounts(0),
+ mExtendedLogging(false),
+ mHaveForkedHousekeeping(false),
+ mIsHousekeepingProcess(false),
+ mInterProcessComms(mInterProcessCommsSocket)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::~BackupStoreDaemon()
+// Purpose: Destructor
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+BackupStoreDaemon::~BackupStoreDaemon()
+{
+ // Must delete this one before the database ...
+ if(mpAccounts != 0)
+ {
+ delete mpAccounts;
+ mpAccounts = 0;
+ }
+ // ... as the mpAccounts object has a reference to it
+ if(mpAccountDatabase != 0)
+ {
+ delete mpAccountDatabase;
+ mpAccountDatabase = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::DaemonName()
+// Purpose: Name of daemon
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+const char *BackupStoreDaemon::DaemonName() const
+{
+ return "bbstored";
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::DaemonBanner()
+// Purpose: Daemon banner
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+const char *BackupStoreDaemon::DaemonBanner() const
+{
+#ifndef NDEBUG
+ // Don't display banner in debug builds
+ return 0;
+#else
+ return BANNER_TEXT("Backup Store Server");
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::GetConfigVerify()
+// Purpose: Configuration definition
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+const ConfigurationVerify *BackupStoreDaemon::GetConfigVerify() const
+{
+ return &BackupConfigFileVerify;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::SetupInInitialProcess()
+// Purpose: Setup before we fork -- get raid file controller going
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+void BackupStoreDaemon::SetupInInitialProcess()
+{
+ const Configuration &config(GetConfiguration());
+
+ // Initialise the raid files controller
+ RaidFileController &rcontroller = RaidFileController::GetController();
+ rcontroller.Initialise(config.GetKeyValue("RaidFileConf").c_str());
+
+ // Load the account database
+ std::auto_ptr<BackupStoreAccountDatabase> pdb(BackupStoreAccountDatabase::Read(config.GetKeyValue("AccountDatabase").c_str()));
+ mpAccountDatabase = pdb.release();
+
+ // Create a accounts object
+ mpAccounts = new BackupStoreAccounts(*mpAccountDatabase);
+
+ // Ready to go!
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::Run()
+// Purpose: Run shim for the store daemon -- read some config details
+// Created: 2003/10/24
+//
+// --------------------------------------------------------------------------
+void BackupStoreDaemon::Run()
+{
+ // Get extended logging flag
+ mExtendedLogging = false;
+ const Configuration &config(GetConfiguration());
+ mExtendedLogging = config.GetKeyValueBool("ExtendedLogging");
+
+ // Fork off housekeeping daemon -- must only do this the first time Run() is called
+ if(!mHaveForkedHousekeeping)
+ {
+ // Open a socket pair for communication
+ int sv[2] = {-1,-1};
+ if(::socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, sv) != 0)
+ {
+ THROW_EXCEPTION(ServerException, SocketPairFailed)
+ }
+ int whichSocket = 0;
+
+ // Fork
+ switch(::fork())
+ {
+ case -1:
+ {
+ // Error
+ THROW_EXCEPTION(ServerException, ServerForkError)
+ }
+ break;
+ case 0:
+ {
+ // In child process
+ mIsHousekeepingProcess = true;
+ SetProcessTitle("housekeeping, idle");
+ whichSocket = 1;
+ // Change the log name
+ ::openlog("bbstored/hk", LOG_PID, LOG_LOCAL6);
+ // Log that housekeeping started
+ ::syslog(LOG_INFO, "Housekeeping process started");
+ // Ignore term and hup
+ // Parent will handle these and alert the child via the socket, don't want to randomly die
+ ::signal(SIGHUP, SIG_IGN);
+ ::signal(SIGTERM, SIG_IGN);
+ }
+ break;
+ default:
+ {
+ // Parent process
+ whichSocket = 0;
+ }
+ break;
+ }
+
+ // Mark that this has been, so -HUP doesn't try and do this again
+ mHaveForkedHousekeeping = true;
+
+ // Attach the comms thing to the right socket, and close the other one
+ mInterProcessCommsSocket.Attach(sv[whichSocket]);
+
+ if(::close(sv[(whichSocket == 0)?1:0]) != 0)
+ {
+ THROW_EXCEPTION(ServerException, SocketCloseError)
+ }
+ }
+
+ if(mIsHousekeepingProcess)
+ {
+ // Housekeeping process -- do other stuff
+ HousekeepingProcess();
+ }
+ else
+ {
+ // In server process -- use the base class to do the magic
+ ServerTLS<BOX_PORT_BBSTORED>::Run();
+
+ // Why did it stop? Tell the housekeeping process to do the same
+ if(IsReloadConfigWanted())
+ {
+ mInterProcessCommsSocket.Write("h\n", 2);
+ }
+ if(IsTerminateWanted())
+ {
+ mInterProcessCommsSocket.Write("t\n", 2);
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupStoreDaemon::Connection(SocketStreamTLS &)
+// Purpose: Handles a connection
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+void BackupStoreDaemon::Connection(SocketStreamTLS &rStream)
+{
+ // Get the common name from the certificate
+ std::string clientCommonName(rStream.GetPeerCommonName());
+
+ // Log the name
+ ::syslog(LOG_INFO, "Certificate CN: %s\n", clientCommonName.c_str());
+
+ // Check it
+ int32_t id;
+ if(::sscanf(clientCommonName.c_str(), "BACKUP-%x", &id) != 1)
+ {
+ // Bad! Disconnect immediately
+ return;
+ }
+
+ // Make ps listings clearer
+ SetProcessTitle("client %08x", id);
+
+ // Create a context, using this ID
+ BackupContext context(id, *this);
+
+ // See if the client has an account?
+ if(mpAccounts && mpAccounts->AccountExists(id))
+ {
+ std::string root;
+ int discSet;
+ mpAccounts->GetAccountRoot(id, root, discSet);
+ context.SetClientHasAccount(root, discSet);
+ }
+
+ // Handle a connection with the backup protocol
+ BackupProtocolServer server(rStream);
+ server.SetLogToSysLog(mExtendedLogging);
+ server.SetTimeout(BACKUP_STORE_TIMEOUT);
+ server.DoServer(context);
+ context.CleanUp();
+}
+
diff --git a/bin/bbstored/BackupStoreDaemon.h b/bin/bbstored/BackupStoreDaemon.h
new file mode 100755
index 00000000..6c4a45bc
--- /dev/null
+++ b/bin/bbstored/BackupStoreDaemon.h
@@ -0,0 +1,77 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupStoreDaemon.h
+// Purpose: Backup store daemon
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPSTOREDAEMON__H
+#define BACKUPSTOREDAEMON__H
+
+#include "ServerTLS.h"
+#include "BoxPortsAndFiles.h"
+#include "BackupConstants.h"
+#include "IOStreamGetLine.h"
+
+class BackupStoreAccounts;
+class BackupStoreAccountDatabase;
+class HousekeepStoreAccount;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupStoreDaemon
+// Purpose: Backup store daemon implementation
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+class BackupStoreDaemon : public ServerTLS<BOX_PORT_BBSTORED>
+{
+ friend class HousekeepStoreAccount;
+
+public:
+ BackupStoreDaemon();
+ ~BackupStoreDaemon();
+private:
+ BackupStoreDaemon(const BackupStoreDaemon &rToCopy);
+public:
+
+ // For BackupContext to comminicate with housekeeping process
+ void SendMessageToHousekeepingProcess(const void *Msg, int MsgLen)
+ {
+ mInterProcessCommsSocket.Write(Msg, MsgLen);
+ }
+
+protected:
+
+ virtual void SetupInInitialProcess();
+
+ virtual void Run();
+
+ void Connection(SocketStreamTLS &rStream);
+
+ virtual const char *DaemonName() const;
+ virtual const char *DaemonBanner() const;
+
+ const ConfigurationVerify *GetConfigVerify() const;
+
+ // Housekeeping functions
+ void HousekeepingProcess();
+ bool CheckForInterProcessMsg(int AccountNum = 0, int MaximumWaitTime = 0);
+
+private:
+ BackupStoreAccountDatabase *mpAccountDatabase;
+ BackupStoreAccounts *mpAccounts;
+ bool mExtendedLogging;
+ bool mHaveForkedHousekeeping;
+ bool mIsHousekeepingProcess;
+
+ SocketStream mInterProcessCommsSocket;
+ IOStreamGetLine mInterProcessComms;
+};
+
+
+#endif // BACKUPSTOREDAEMON__H
+
diff --git a/bin/bbstored/HousekeepStoreAccount.cpp b/bin/bbstored/HousekeepStoreAccount.cpp
new file mode 100755
index 00000000..4aa1999e
--- /dev/null
+++ b/bin/bbstored/HousekeepStoreAccount.cpp
@@ -0,0 +1,844 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: HousekeepStoreAccount.cpp
+// Purpose:
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <map>
+#include <stdio.h>
+
+#include "HousekeepStoreAccount.h"
+#include "BackupStoreDaemon.h"
+#include "StoreStructure.h"
+#include "BackupStoreConstants.h"
+#include "RaidFileRead.h"
+#include "RaidFileWrite.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreInfo.h"
+#include "NamedLock.h"
+#include "autogen_BackupStoreException.h"
+#include "BackupStoreFile.h"
+
+#include "MemLeakFindOn.h"
+
+// check every 32 directories scanned/files deleted
+#define POLL_INTERPROCESS_MSG_CHECK_FREQUENCY 32
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::HousekeepStoreAccount(int, const std::string &, int, BackupStoreDaemon &)
+// Purpose: Constructor
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+HousekeepStoreAccount::HousekeepStoreAccount(int AccountID, const std::string &rStoreRoot, int StoreDiscSet, BackupStoreDaemon &rDaemon)
+ : mAccountID(AccountID),
+ mStoreRoot(rStoreRoot),
+ mStoreDiscSet(StoreDiscSet),
+ mrDaemon(rDaemon),
+ mDeletionSizeTarget(0),
+ mPotentialDeletionsTotalSize(0),
+ mMaxSizeInPotentialDeletions(0),
+ mBlocksUsed(0),
+ mBlocksInOldFiles(0),
+ mBlocksInDeletedFiles(0),
+ mBlocksInDirectories(0),
+ mBlocksUsedDelta(0),
+ mBlocksInOldFilesDelta(0),
+ mBlocksInDeletedFilesDelta(0),
+ mBlocksInDirectoriesDelta(0),
+ mFilesDeleted(0),
+ mEmptyDirectoriesDeleted(0),
+ mCountUntilNextInterprocessMsgCheck(POLL_INTERPROCESS_MSG_CHECK_FREQUENCY)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::~HousekeepStoreAccount()
+// Purpose: Destructor
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+HousekeepStoreAccount::~HousekeepStoreAccount()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DoHousekeeping()
+// Purpose: Perform the housekeeping
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void HousekeepStoreAccount::DoHousekeeping()
+{
+ // Attempt to lock the account
+ std::string writeLockFilename;
+ StoreStructure::MakeWriteLockFilename(mStoreRoot, mStoreDiscSet, writeLockFilename);
+ NamedLock writeLock;
+ if(!writeLock.TryAndGetLock(writeLockFilename.c_str(), 0600 /* restrictive file permissions */))
+ {
+ // Couldn't lock the account -- just stop now
+ return;
+ }
+
+ // Load the store info to find necessary info for the housekeeping
+ std::auto_ptr<BackupStoreInfo> info(BackupStoreInfo::Load(mAccountID, mStoreRoot, mStoreDiscSet, false /* Read/Write */));
+
+ // Calculate how much should be deleted
+ mDeletionSizeTarget = info->GetBlocksUsed() - info->GetBlocksSoftLimit();
+ if(mDeletionSizeTarget < 0)
+ {
+ mDeletionSizeTarget = 0;
+ }
+
+ // Scan the directory for potential things to delete
+ // This will also remove elegiable items marked with RemoveASAP
+ bool continueHousekeeping = ScanDirectory(BACKUPSTORE_ROOT_DIRECTORY_ID);
+
+ // If scan directory stopped for some reason, probably parent instructed to teminate, stop now.
+ if(!continueHousekeeping)
+ {
+ // If any files were marked "delete now", then update the size of the store.
+ if(mBlocksUsedDelta != 0 || mBlocksInOldFilesDelta != 0 || mBlocksInDeletedFilesDelta != 0)
+ {
+ info->ChangeBlocksUsed(mBlocksUsedDelta);
+ info->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta);
+ info->ChangeBlocksInDeletedFiles(mBlocksInDeletedFilesDelta);
+
+ // Save the store info back
+ info->Save();
+ }
+
+ return;
+ }
+
+ // Log any difference in opinion between the values recorded in the store info, and
+ // the values just calculated for space usage.
+ // BLOCK
+ {
+ int64_t used = info->GetBlocksUsed();
+ int64_t usedOld = info->GetBlocksInOldFiles();
+ int64_t usedDeleted = info->GetBlocksInDeletedFiles();
+ int64_t usedDirectories = info->GetBlocksInDirectories();
+
+ // If the counts were wrong, taking into account RemoveASAP items deleted, log a message
+ if((used + mBlocksUsedDelta) != mBlocksUsed || (usedOld + mBlocksInOldFilesDelta) != mBlocksInOldFiles
+ || (usedDeleted + mBlocksInDeletedFilesDelta) != mBlocksInDeletedFiles || usedDirectories != mBlocksInDirectories)
+ {
+ // Log this
+ ::syslog(LOG_ERR, "On housekeeping, sizes in store do not match calculated sizes, correcting");
+ ::syslog(LOG_ERR, "different (store,calc): acc 0x%08x, used (%lld,%lld), old (%lld,%lld), deleted (%lld,%lld), dirs (%lld,%lld)",
+ mAccountID,
+ (used + mBlocksUsedDelta), mBlocksUsed, (usedOld + mBlocksInOldFilesDelta), mBlocksInOldFiles,
+ (usedDeleted + mBlocksInDeletedFilesDelta), mBlocksInDeletedFiles, usedDirectories, mBlocksInDirectories);
+ }
+
+ // If the current values don't match, store them
+ if(used != mBlocksUsed || usedOld != mBlocksInOldFiles
+ || usedDeleted != mBlocksInDeletedFiles || usedDirectories != (mBlocksInDirectories + mBlocksInDirectoriesDelta))
+ {
+ // Set corrected values in store info
+ info->CorrectAllUsedValues(mBlocksUsed, mBlocksInOldFiles, mBlocksInDeletedFiles, mBlocksInDirectories + mBlocksInDirectoriesDelta);
+ info->Save();
+ }
+ }
+
+ // Reset the delta counts for files, as they will include RemoveASAP flagged files deleted
+ // during the initial scan.
+ int64_t removeASAPBlocksUsedDelta = mBlocksUsedDelta; // keep for reporting
+ mBlocksUsedDelta = 0;
+ mBlocksInOldFilesDelta = 0;
+ mBlocksInDeletedFilesDelta = 0;
+
+ // Go and delete items from the accounts
+ bool deleteInterrupted = DeleteFiles();
+
+ // If that wasn't interrupted, remove any empty directories which are also marked as deleted in their containing directory
+ if(!deleteInterrupted)
+ {
+ deleteInterrupted = DeleteEmptyDirectories();
+ }
+
+ // Log deletion if anything was deleted
+ if(mFilesDeleted > 0 || mEmptyDirectoriesDeleted > 0)
+ {
+ ::syslog(LOG_INFO, "Account 0x%08x, removed %lld blocks (%lld files, %lld dirs)%s", mAccountID, 0 - (mBlocksUsedDelta + removeASAPBlocksUsedDelta),
+ mFilesDeleted, mEmptyDirectoriesDeleted,
+ deleteInterrupted?" was interrupted":"");
+ }
+
+ // Make sure the delta's won't cause problems if the counts are really wrong, and
+ // it wasn't fixed because the store was updated during the scan.
+ if(mBlocksUsedDelta < (0 - info->GetBlocksUsed())) mBlocksUsedDelta = (0 - info->GetBlocksUsed());
+ if(mBlocksInOldFilesDelta < (0 - info->GetBlocksInOldFiles())) mBlocksInOldFilesDelta = (0 - info->GetBlocksInOldFiles());
+ if(mBlocksInDeletedFilesDelta < (0 - info->GetBlocksInDeletedFiles())) mBlocksInDeletedFilesDelta =(0 - info->GetBlocksInDeletedFiles());
+ if(mBlocksInDirectoriesDelta < (0 - info->GetBlocksInDirectories())) mBlocksInDirectoriesDelta = (0 - info->GetBlocksInDirectories());
+
+ // Update the usage counts in the store
+ info->ChangeBlocksUsed(mBlocksUsedDelta);
+ info->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta);
+ info->ChangeBlocksInDeletedFiles(mBlocksInDeletedFilesDelta);
+ info->ChangeBlocksInDirectories(mBlocksInDirectoriesDelta);
+
+ // Save the store info back
+ info->Save();
+
+ // Explicity release the lock (would happen automatically on going out of scope, included for code clarity)
+ writeLock.ReleaseLock();
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::MakeObjectFilename(int64_t, std::string &)
+// Purpose: Generate and place the filename for a given object ID
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void HousekeepStoreAccount::MakeObjectFilename(int64_t ObjectID, std::string &rFilenameOut)
+{
+ // Delegate to utility function
+ StoreStructure::MakeObjectFilename(ObjectID, mStoreRoot, mStoreDiscSet, rFilenameOut, false /* don't bother ensuring the directory exists */);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::ScanDirectory(int64_t)
+// Purpose: Private. Scan a directory for potenitally deleteable items, and
+// add them to the list. Returns true if the scan should continue.
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID)
+{
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck = POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+ // Check for having to stop
+ if(mrDaemon.CheckForInterProcessMsg(mAccountID)) // include account ID here as the specified account is locked
+ {
+ // Need to abort now
+ return false;
+ }
+ }
+
+ // Get the filename
+ std::string objectFilename;
+ MakeObjectFilename(ObjectID, objectFilename);
+
+ // Open it.
+ std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet, objectFilename));
+
+ // Add the size of the directory on disc to the size being calculated
+ int64_t originalDirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
+ mBlocksInDirectories += originalDirSizeInBlocks;
+ mBlocksUsed += originalDirSizeInBlocks;
+
+ // Read the directory in
+ BackupStoreDirectory dir;
+ dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
+
+ // Is it empty?
+ if(dir.GetNumberOfEntries() == 0)
+ {
+ // Add it to the list of directories to potentially delete
+ mEmptyDirectories.push_back(dir.GetObjectID());
+ }
+
+ // BLOCK
+ {
+ // Remove any files which are marked for removal as soon as they become old
+ // or deleted.
+ bool deletedSomething = false;
+ do
+ {
+ // Iterate through the directory
+ deletedSomething = false;
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ int16_t enFlags = en->GetFlags();
+ if((enFlags & BackupStoreDirectory::Entry::Flags_RemoveASAP) != 0
+ && (enFlags & (BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0)
+ {
+ // Delete this immediately.
+ DeleteFile(ObjectID, en->GetObjectID(), dir, objectFilename, originalDirSizeInBlocks);
+
+ // flag as having done something
+ deletedSomething = true;
+
+ // Must start the loop from the beginning again, as iterator is now
+ // probably invalid.
+ break;
+ }
+ }
+ } while(deletedSomething);
+ }
+
+ // BLOCK
+ {
+ // Add files to the list of potential deletions
+
+ // map to count the distance from the mark
+ std::map<std::pair<BackupStoreFilename, int32_t>, int32_t> markVersionAges;
+ // map of pair (filename, mark number) -> version age
+
+ // NOTE: use a reverse iterator to allow the distance from mark stuff to work
+ BackupStoreDirectory::ReverseIterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0)
+ {
+ // Update recalculated usage sizes
+ int16_t enFlags = en->GetFlags();
+ int64_t enSizeInBlocks = en->GetSizeInBlocks();
+ mBlocksUsed += enSizeInBlocks;
+ if(enFlags & BackupStoreDirectory::Entry::Flags_OldVersion) mBlocksInOldFiles += enSizeInBlocks;
+ if(enFlags & BackupStoreDirectory::Entry::Flags_Deleted) mBlocksInDeletedFiles += enSizeInBlocks;
+
+ // Work out ages of this version from the last mark
+ int32_t enVersionAge = 0;
+ std::map<std::pair<BackupStoreFilename, int32_t>, int32_t>::iterator enVersionAgeI(markVersionAges.find(std::pair<BackupStoreFilename, int32_t>(en->GetName(), en->GetMarkNumber())));
+ if(enVersionAgeI != markVersionAges.end())
+ {
+ enVersionAge = enVersionAgeI->second + 1;
+ enVersionAgeI->second = enVersionAge;
+ }
+ else
+ {
+ markVersionAges[std::pair<BackupStoreFilename, int32_t>(en->GetName(), en->GetMarkNumber())] = enVersionAge;
+ }
+ // enVersionAge is now the age of this version.
+
+ // Potentially add it to the list if it's deleted, if it's an old version or deleted
+ if((enFlags & (BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0)
+ {
+ // Is deleted / old version.
+ DelEn d;
+ d.mObjectID = en->GetObjectID();
+ d.mInDirectory = ObjectID;
+ d.mSizeInBlocks = en->GetSizeInBlocks();
+ d.mMarkNumber = en->GetMarkNumber();
+ d.mVersionAgeWithinMark = enVersionAge;
+
+ // Add it to the list
+ mPotentialDeletions.insert(d);
+
+ // Update various counts
+ mPotentialDeletionsTotalSize += d.mSizeInBlocks;
+ if(d.mSizeInBlocks > mMaxSizeInPotentialDeletions) mMaxSizeInPotentialDeletions = d.mSizeInBlocks;
+
+ // Too much in the list of potential deletions?
+ // (check against the deletion target + the max size in deletions, so that we never delete things
+ // and take the total size below the deletion size target)
+ if(mPotentialDeletionsTotalSize > (mDeletionSizeTarget + mMaxSizeInPotentialDeletions))
+ {
+ int64_t sizeToRemove = mPotentialDeletionsTotalSize - (mDeletionSizeTarget + mMaxSizeInPotentialDeletions);
+ bool recalcMaxSize = false;
+
+ while(sizeToRemove > 0)
+ {
+ // Make iterator for the last element, while checking that there's something there in the first place.
+ std::set<DelEn, DelEnCompare>::iterator i(mPotentialDeletions.end());
+ if(i != mPotentialDeletions.begin())
+ {
+ // Nothing left in set
+ break;
+ }
+ // Make this into an iterator pointing to the last element in the set
+ --i;
+
+ // Delete this one?
+ if(sizeToRemove > i->mSizeInBlocks)
+ {
+ sizeToRemove -= i->mSizeInBlocks;
+ if(i->mSizeInBlocks >= mMaxSizeInPotentialDeletions)
+ {
+ // Will need to recalculate the maximum size now, because we've just deleted that element
+ recalcMaxSize = true;
+ }
+ mPotentialDeletions.erase(i);
+ }
+ else
+ {
+ // Over the size to remove, so stop now
+ break;
+ }
+ }
+
+ if(recalcMaxSize)
+ {
+ // Because an object which was the maximum size recorded was deleted from the set
+ // it's necessary to recalculate this maximum.
+ mMaxSizeInPotentialDeletions = 0;
+ std::set<DelEn, DelEnCompare>::const_iterator i(mPotentialDeletions.begin());
+ for(; i != mPotentialDeletions.end(); ++i)
+ {
+ if(i->mSizeInBlocks > mMaxSizeInPotentialDeletions)
+ {
+ mMaxSizeInPotentialDeletions = i->mSizeInBlocks;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ {
+ // Recurse into subdirectories
+ BackupStoreDirectory::Iterator i(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir)) != 0)
+ {
+ // Next level
+ ASSERT((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == BackupStoreDirectory::Entry::Flags_Dir);
+
+ if(!ScanDirectory(en->GetObjectID()))
+ {
+ // Halting operation
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount::DelEn &, const HousekeepStoreAccount::DelEnd &)
+// Purpose: Comparison function for set
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount::DelEn &x, const HousekeepStoreAccount::DelEn &y)
+{
+ // STL spec says this:
+ // A Strict Weak Ordering is a Binary Predicate that compares two objects, returning true if the first precedes the second.
+
+ // The sort order here is intended to preserve the entries of most value, that is, the newest objects
+ // which are on a mark boundary.
+
+ // Reverse order age, so oldest goes first
+ if(x.mVersionAgeWithinMark > y.mVersionAgeWithinMark)
+ {
+ return true;
+ }
+ else if(x.mVersionAgeWithinMark < y.mVersionAgeWithinMark)
+ {
+ return false;
+ }
+
+ // but mark number in ascending order, so that the oldest marks are deleted first
+ if(x.mMarkNumber < y.mMarkNumber)
+ {
+ return true;
+ }
+ else if(x.mMarkNumber > y.mMarkNumber)
+ {
+ return false;
+ }
+
+ // Just compare object ID now to put the oldest objects first
+ return x.mObjectID < y.mObjectID;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteFiles()
+// Purpose: Delete the files targetted for deletion, returning true if the operation was interrupted
+// Created: 15/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DeleteFiles()
+{
+ // Only delete files if the deletion target is greater than zero
+ // (otherwise we delete one file each time round, which gradually deletes the old versions)
+ if(mDeletionSizeTarget <= 0)
+ {
+ // Not interrupted
+ return false;
+ }
+
+ // Iterate through the set of potential deletions, until enough has been deleted.
+ // (there is likely to be more in the set than should be actually deleted).
+ for(std::set<DelEn, DelEnCompare>::iterator i(mPotentialDeletions.begin()); i != mPotentialDeletions.end(); ++i)
+ {
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck = POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+ // Check for having to stop
+ if(mrDaemon.CheckForInterProcessMsg(mAccountID)) // include account ID here as the specified account is now locked
+ {
+ // Need to abort now
+ return true;
+ }
+ }
+
+ // Load up the directory it's in
+ // Get the filename
+ std::string dirFilename;
+ BackupStoreDirectory dir;
+ int64_t dirSizeInBlocksOrig = 0;
+ {
+ MakeObjectFilename(i->mInDirectory, dirFilename);
+ std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet, dirFilename));
+ dirSizeInBlocksOrig = dirStream->GetDiscUsageInBlocks();
+ dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
+ }
+
+ // Delete the file
+ DeleteFile(i->mInDirectory, i->mObjectID, dir, dirFilename, dirSizeInBlocksOrig);
+
+ // Stop if the deletion target has been matched or exceeded
+ // (checking here rather than at the beginning will tend to reduce the
+ // space to slightly less than the soft limit, which will allow the backup
+ // client to start uploading files again)
+ if((0 - mBlocksUsedDelta) >= mDeletionSizeTarget)
+ {
+ break;
+ }
+ }
+
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteFile(int64_t, int64_t, BackupStoreDirectory &, const std::string &, int64_t)
+// Purpose: Delete a file. Takes the directory already loaded in and the filename,
+// for efficiency in both the usage senarios.
+// Created: 15/7/04
+//
+// --------------------------------------------------------------------------
+void HousekeepStoreAccount::DeleteFile(int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, int64_t OriginalDirSizeInBlocks)
+{
+ // Find the entry inside the directory
+ bool wasDeleted = false;
+ bool wasOldVersion = false;
+ int64_t deletedFileSizeInBlocks = 0;
+ // A pointer to an object which requires commiting if the directory save goes OK
+ std::auto_ptr<RaidFileWrite> padjustedEntry;
+ // BLOCK
+ {
+ BackupStoreDirectory::Entry *pentry = rDirectory.FindEntryByID(ObjectID);
+ if(pentry == 0)
+ {
+ ::syslog(LOG_ERR, "acc 0x%08x, object %lld not found in dir %lld, logic error/corruption? Run bbstoreaccounts check <accid> fix", mAccountID, ObjectID, InDirectory);
+ return;
+ }
+
+ // Record the flags it's got set
+ wasDeleted = ((pentry->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0);
+ wasOldVersion = ((pentry->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) != 0);
+ // Check this should be deleted
+ if(!wasDeleted && !wasOldVersion)
+ {
+ // Things changed size we were last around
+ return;
+ }
+
+ // Record size
+ deletedFileSizeInBlocks = pentry->GetSizeInBlocks();
+
+ // If the entry is involved in a chain of patches, it needs to be handled
+ // a bit more carefully.
+ if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0)
+ {
+ // This entry is a patch from a newer entry. Just need to update the info on that entry.
+ BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
+ if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Change the info in the newer entry so that this no longer points to this entry
+ pnewer->SetDependsOlder(0);
+ }
+ else if(pentry->GetDependsOlder() != 0)
+ {
+ BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder());
+ if(pentry->GetDependsNewer() == 0)
+ {
+ // There exists an older version which depends on this one. Need to combine the two over that one.
+
+ // Adjust the other entry in the directory
+ if(polder == 0 || polder->GetDependsNewer() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Change the info in the older entry so that this no longer points to this entry
+ polder->SetDependsNewer(0);
+ }
+ else
+ {
+ // This entry is in the middle of a chain, and two patches need combining.
+
+ // First, adjust the directory entries
+ BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
+ if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID
+ || polder == 0 || polder->GetDependsNewer() != ObjectID)
+ {
+ THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory);
+ }
+ // Remove the middle entry from the linked list by simply using the values from this entry
+ pnewer->SetDependsOlder(pentry->GetDependsOlder());
+ polder->SetDependsNewer(pentry->GetDependsNewer());
+ }
+
+ // COMMON CODE to both cases
+
+ // Generate the filename of the older version
+ std::string objFilenameOlder;
+ MakeObjectFilename(pentry->GetDependsOlder(), objFilenameOlder);
+ // Open it twice (it's the diff)
+ std::auto_ptr<RaidFileRead> pdiff(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
+ std::auto_ptr<RaidFileRead> pdiff2(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder));
+ // Open this file
+ std::string objFilename;
+ MakeObjectFilename(ObjectID, objFilename);
+ std::auto_ptr<RaidFileRead> pobjectBeingDeleted(RaidFileRead::Open(mStoreDiscSet, objFilename));
+ // And open a write file to overwrite the other directory entry
+ padjustedEntry.reset(new RaidFileWrite(mStoreDiscSet, objFilenameOlder));
+ padjustedEntry->Open(true /* allow overwriting */);
+
+ if(pentry->GetDependsNewer() == 0)
+ {
+ // There exists an older version which depends on this one. Need to combine the two over that one.
+ BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, *padjustedEntry);
+ }
+ else
+ {
+ // This entry is in the middle of a chain, and two patches need combining.
+ BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry);
+ }
+ // The file will be committed later when the directory is safely commited.
+
+ // Work out the adjusted size
+ int64_t newSize = padjustedEntry->GetDiscUsageInBlocks();
+ int64_t sizeDelta = newSize - polder->GetSizeInBlocks();
+ mBlocksUsedDelta += sizeDelta;
+ if((polder->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0)
+ {
+ mBlocksInDeletedFilesDelta += sizeDelta;
+ }
+ if((polder->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) != 0)
+ {
+ mBlocksInOldFilesDelta += sizeDelta;
+ }
+ polder->SetSizeInBlocks(newSize);
+ }
+
+ // pentry no longer valid
+ }
+
+ // Delete it from the directory
+ rDirectory.DeleteEntry(ObjectID);
+
+ // Save directory back to disc
+ // BLOCK
+ int64_t dirRevisedSize = 0;
+ {
+ RaidFileWrite writeDir(mStoreDiscSet, rDirectoryFilename);
+ writeDir.Open(true /* allow overwriting */);
+ rDirectory.WriteToStream(writeDir);
+
+ // get the disc usage (must do this before commiting it)
+ dirRevisedSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // adjust usage counts for this directory
+ if(dirRevisedSize > 0)
+ {
+ int64_t adjust = dirRevisedSize - OriginalDirSizeInBlocks;
+ mBlocksUsedDelta += adjust;
+ mBlocksInDirectoriesDelta += adjust;
+ }
+ }
+
+ // Commit any new adjusted entry
+ if(padjustedEntry.get() != 0)
+ {
+ padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ padjustedEntry.reset(); // delete it now
+ }
+
+ // Delete from disc
+ {
+ std::string objFilename;
+ MakeObjectFilename(ObjectID, objFilename);
+ RaidFileWrite del(mStoreDiscSet, objFilename);
+ del.Delete();
+ }
+
+ // Adjust counts for the file
+ ++mFilesDeleted;
+ mBlocksUsedDelta -= deletedFileSizeInBlocks;
+ if(wasDeleted) mBlocksInDeletedFilesDelta -= deletedFileSizeInBlocks;
+ if(wasOldVersion) mBlocksInOldFilesDelta -= deletedFileSizeInBlocks;
+
+ // Delete the directory?
+ // Do this if... dir has zero entries, and is marked as deleted in it's containing directory
+ if(rDirectory.GetNumberOfEntries() == 0)
+ {
+ // Candidate for deletion
+ mEmptyDirectories.push_back(InDirectory);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: HousekeepStoreAccount::DeleteEmptyDirectories()
+// Purpose: Remove any empty directories which are also marked as deleted in their containing directory,
+// returning true if the opertaion was interrupted
+// Created: 15/12/03
+//
+// --------------------------------------------------------------------------
+bool HousekeepStoreAccount::DeleteEmptyDirectories()
+{
+ while(mEmptyDirectories.size() > 0)
+ {
+ std::vector<int64_t> toExamine;
+
+ // Go through list
+ for(std::vector<int64_t>::const_iterator i(mEmptyDirectories.begin()); i != mEmptyDirectories.end(); ++i)
+ {
+ if((--mCountUntilNextInterprocessMsgCheck) <= 0)
+ {
+ mCountUntilNextInterprocessMsgCheck = POLL_INTERPROCESS_MSG_CHECK_FREQUENCY;
+ // Check for having to stop
+ if(mrDaemon.CheckForInterProcessMsg(mAccountID)) // include account ID here as the specified account is now locked
+ {
+ // Need to abort now
+ return true;
+ }
+ }
+
+ // Do not delete the root directory
+ if(*i == BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ continue;
+ }
+
+ // Load up the directory to potentially delete
+ std::string dirFilename;
+ BackupStoreDirectory dir;
+ int64_t dirSizeInBlocks = 0;
+ {
+ MakeObjectFilename(*i, dirFilename);
+ // Check it actually exists (just in case it gets added twice to the list)
+ if(!RaidFileRead::FileExists(mStoreDiscSet, dirFilename))
+ {
+ // doesn't exist, next!
+ continue;
+ }
+ // load
+ std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet, dirFilename));
+ dirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
+ dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
+ }
+
+ // Make sure this directory is actually empty
+ if(dir.GetNumberOfEntries() != 0)
+ {
+ // Not actually empty, try next one
+ continue;
+ }
+
+ // Candiate for deletion... open containing directory
+ std::string containingDirFilename;
+ BackupStoreDirectory containingDir;
+ int64_t containingDirSizeInBlocksOrig = 0;
+ {
+ MakeObjectFilename(dir.GetContainerID(), containingDirFilename);
+ std::auto_ptr<RaidFileRead> containingDirStream(RaidFileRead::Open(mStoreDiscSet, containingDirFilename));
+ containingDirSizeInBlocksOrig = containingDirStream->GetDiscUsageInBlocks();
+ containingDir.ReadFromStream(*containingDirStream, IOStream::TimeOutInfinite);
+ }
+
+ // Find the entry
+ BackupStoreDirectory::Entry *pdirentry = containingDir.FindEntryByID(dir.GetObjectID());
+ if((pdirentry != 0) && ((pdirentry->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) != 0))
+ {
+ // Should be deleted
+ containingDir.DeleteEntry(dir.GetObjectID());
+
+ // Is the containing dir now a candidate for deletion?
+ if(containingDir.GetNumberOfEntries() == 0)
+ {
+ toExamine.push_back(containingDir.GetObjectID());
+ }
+
+ // Write revised parent directory
+ RaidFileWrite writeDir(mStoreDiscSet, containingDirFilename);
+ writeDir.Open(true /* allow overwriting */);
+ containingDir.WriteToStream(writeDir);
+
+ // get the disc usage (must do this before commiting it)
+ int64_t dirSize = writeDir.GetDiscUsageInBlocks();
+
+ // Commit directory
+ writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+
+ // adjust usage counts for this directory
+ if(dirSize > 0)
+ {
+ int64_t adjust = dirSize - containingDirSizeInBlocksOrig;
+ mBlocksUsedDelta += adjust;
+ mBlocksInDirectoriesDelta += adjust;
+ }
+
+ // Delete the directory itself
+ {
+ RaidFileWrite del(mStoreDiscSet, dirFilename);
+ del.Delete();
+ }
+
+ // And adjust usage counts for the directory that's just been deleted
+ mBlocksUsedDelta -= dirSizeInBlocks;
+ mBlocksInDirectoriesDelta -= dirSizeInBlocks;
+
+ // Update count
+ ++mEmptyDirectoriesDeleted;
+ }
+ }
+
+ // Remove contents of empty directories
+ mEmptyDirectories.clear();
+ // Swap in new, so it's examined next time round
+ mEmptyDirectories.swap(toExamine);
+ }
+
+ // Not interrupted
+ return false;
+}
+
+
+
+
diff --git a/bin/bbstored/HousekeepStoreAccount.h b/bin/bbstored/HousekeepStoreAccount.h
new file mode 100755
index 00000000..6c8f251d
--- /dev/null
+++ b/bin/bbstored/HousekeepStoreAccount.h
@@ -0,0 +1,97 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: HousekeepStoreAccount.h
+// Purpose: Action class to perform housekeeping on a store account
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef HOUSEKEEPSTOREACCOUNT__H
+#define HOUSEKEEPSTOREACCOUNT__H
+
+#include <string>
+#include <set>
+#include <vector>
+
+class BackupStoreDaemon;
+class BackupStoreDirectory;
+
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: HousekeepStoreAccount
+// Purpose: Action class to perform housekeeping on a store account
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+class HousekeepStoreAccount
+{
+public:
+ HousekeepStoreAccount(int AccountID, const std::string &rStoreRoot, int StoreDiscSet, BackupStoreDaemon &rDaemon);
+ ~HousekeepStoreAccount();
+
+ void DoHousekeeping();
+
+
+private:
+ // utility functions
+ void MakeObjectFilename(int64_t ObjectID, std::string &rFilenameOut);
+
+ bool ScanDirectory(int64_t ObjectID);
+ bool DeleteFiles();
+ bool DeleteEmptyDirectories();
+ void DeleteFile(int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, int64_t OriginalDirSizeInBlocks);
+
+private:
+ typedef struct
+ {
+ int64_t mObjectID;
+ int64_t mInDirectory;
+ int64_t mSizeInBlocks;
+ int32_t mMarkNumber;
+ int32_t mVersionAgeWithinMark; // 0 == current, 1 latest old version, etc
+ } DelEn;
+
+ struct DelEnCompare
+ {
+ bool operator()(const DelEn &x, const DelEn &y);
+ };
+
+ int mAccountID;
+ std::string mStoreRoot;
+ int mStoreDiscSet;
+ BackupStoreDaemon &mrDaemon;
+
+ int64_t mDeletionSizeTarget;
+
+ std::set<DelEn, DelEnCompare> mPotentialDeletions;
+ int64_t mPotentialDeletionsTotalSize;
+ int64_t mMaxSizeInPotentialDeletions;
+
+ // List of directories which are empty, and might be good for deleting
+ std::vector<int64_t> mEmptyDirectories;
+
+ // The re-calculated blocks used stats
+ int64_t mBlocksUsed;
+ int64_t mBlocksInOldFiles;
+ int64_t mBlocksInDeletedFiles;
+ int64_t mBlocksInDirectories;
+
+ // Deltas from deletion
+ int64_t mBlocksUsedDelta;
+ int64_t mBlocksInOldFilesDelta;
+ int64_t mBlocksInDeletedFilesDelta;
+ int64_t mBlocksInDirectoriesDelta;
+
+ // Deletion count
+ int64_t mFilesDeleted;
+ int64_t mEmptyDirectoriesDeleted;
+
+ // Poll frequency
+ int mCountUntilNextInterprocessMsgCheck;
+};
+
+#endif // HOUSEKEEPSTOREACCOUNT__H
+
diff --git a/bin/bbstored/Makefile.extra b/bin/bbstored/Makefile.extra
new file mode 100755
index 00000000..187d53ef
--- /dev/null
+++ b/bin/bbstored/Makefile.extra
@@ -0,0 +1,9 @@
+
+MAKEPROTOCOL = ../../lib/server/makeprotocol.pl
+
+GEN_CMD_SRV = $(MAKEPROTOCOL) Server backupprotocol.txt
+
+# AUTOGEN SEEDING
+autogen_BackupProtocolServer.cpp autogen_BackupProtocolServer.h: $(MAKEPROTOCOL) backupprotocol.txt
+ perl $(GEN_CMD_SRV)
+
diff --git a/bin/bbstored/backupprotocol.txt b/bin/bbstored/backupprotocol.txt
new file mode 100755
index 00000000..39cb1fb3
--- /dev/null
+++ b/bin/bbstored/backupprotocol.txt
@@ -0,0 +1,221 @@
+#
+# backup protocol definition
+#
+
+Name Backup
+IdentString Box-Backup:v=B
+ServerContextClass BackupContext BackupContext.h
+
+ClientType Filename BackupStoreFilenameClear BackupStoreFilenameClear.h
+ServerType Filename BackupStoreFilename BackupStoreFilename.h
+
+ImplementLog Server syslog
+ImplementLog Client syslog
+ImplementLog Client file
+
+LogTypeToText Client Filename \"%s\" VAR.GetClearFilename().c_str()
+
+BEGIN_OBJECTS
+
+# -------------------------------------------------------------------------------------
+# Session commands
+# -------------------------------------------------------------------------------------
+
+Error 0 IsError(Type,SubType) Reply
+ int32 Type
+ int32 SubType
+ CONSTANT ErrorType 1000
+ CONSTANT Err_WrongVersion 1
+ CONSTANT Err_NotInRightProtocolPhase 2
+ CONSTANT Err_BadLogin 3
+ CONSTANT Err_CannotLockStoreForWriting 4
+ CONSTANT Err_SessionReadOnly 5
+ CONSTANT Err_FileDoesNotVerify 6
+ CONSTANT Err_DoesNotExist 7
+ CONSTANT Err_DirectoryAlreadyExists 8
+ CONSTANT Err_CannotDeleteRoot 9
+ CONSTANT Err_TargetNameExists 10
+ CONSTANT Err_StorageLimitExceeded 11
+ CONSTANT Err_DiffFromFileDoesNotExist 12
+ CONSTANT Err_DoesNotExistInDirectory 13
+ CONSTANT Err_PatchConsistencyError 14
+
+Version 1 Command(Version) Reply
+ int32 Version
+
+
+Login 2 Command(LoginConfirmed)
+ int32 ClientID
+ int32 Flags
+ CONSTANT Flags_ReadOnly 1
+
+
+LoginConfirmed 3 Reply
+ int64 ClientStoreMarker
+ int64 BlocksUsed
+ int64 BlocksSoftLimit
+ int64 BlocksHardLimit
+
+
+Finished 4 Command(Finished) Reply EndsConversation
+
+
+# generic success object
+Success 5 Reply
+ int64 ObjectID
+
+
+SetClientStoreMarker 6 Command(Success)
+ int64 ClientStoreMarker
+
+
+# -------------------------------------------------------------------------------------
+# Generic object commands
+# -------------------------------------------------------------------------------------
+
+GetObject 10 Command(Success)
+ int64 ObjectID
+ CONSTANT NoObject 0
+ # reply has stream following, if ObjectID != NoObject
+
+
+MoveObject 11 Command(Success)
+ int64 ObjectID
+ int64 MoveFromDirectory
+ int64 MoveToDirectory
+ int32 Flags
+ Filename NewFilename
+
+ CONSTANT Flags_MoveAllWithSameName 1
+ CONSTANT Flags_AllowMoveOverDeletedObject 2
+
+# consider this an object command as, although it deals with directory entries,
+# it's not specific to either a file or a directory
+
+
+GetObjectName 12 Command(ObjectName)
+ int64 ObjectID
+ int64 ContainingDirectoryID
+ CONSTANT ObjectID_DirectoryOnly 0
+
+ # set ObjectID to ObjectID_DirectoryOnly to only get info on the directory
+
+
+ObjectName 13 Reply
+ int32 NumNameElements
+ int64 ModificationTime
+ int64 AttributesHash
+ int16 Flags
+ # NumNameElements is zero if the object doesn't exist
+ CONSTANT NumNameElements_ObjectDoesntExist 0
+ # a stream of Filename objects follows, if and only if NumNameElements > 0
+
+
+# -------------------------------------------------------------------------------------
+# Directory commands
+# -------------------------------------------------------------------------------------
+
+CreateDirectory 20 Command(Success) StreamWithCommand
+ int64 ContainingDirectoryID
+ int64 AttributesModTime
+ Filename DirectoryName
+ # stream following containing attributes
+
+
+ListDirectory 21 Command(Success)
+ int64 ObjectID
+ int16 FlagsMustBeSet
+ int16 FlagsNotToBeSet
+ bool SendAttributes
+ # make sure these flags are synced with those in BackupStoreDirectory
+ CONSTANT Flags_INCLUDE_EVERYTHING -1
+ CONSTANT Flags_EXCLUDE_NOTHING 0
+ CONSTANT Flags_EXCLUDE_EVERYTHING 15
+ CONSTANT Flags_File 1
+ CONSTANT Flags_Dir 2
+ CONSTANT Flags_Deleted 4
+ CONSTANT Flags_OldVersion 8
+ # make sure this is the same as in BackupStoreConstants.h
+ CONSTANT RootDirectory 1
+
+ # reply has stream following Success object, containing a stored BackupStoreDirectory
+
+
+ChangeDirAttributes 22 Command(Success) StreamWithCommand
+ int64 ObjectID
+ int64 AttributesModTime
+ # stream following containing attributes
+
+
+DeleteDirectory 23 Command(Success)
+ int64 ObjectID
+
+UndeleteDirectory 24 Command(Success)
+ int64 ObjectID
+ # may not have exactly the desired effect if files within in have been deleted before the directory was deleted.
+
+
+# -------------------------------------------------------------------------------------
+# File commands
+# -------------------------------------------------------------------------------------
+
+StoreFile 30 Command(Success) StreamWithCommand
+ int64 DirectoryObjectID
+ int64 ModificationTime
+ int64 AttributesHash
+ int64 DiffFromFileID # 0 if the file is not a diff
+ Filename Filename
+ # then send a stream containing the encoded file
+
+
+GetFile 31 Command(Success)
+ int64 InDirectory
+ int64 ObjectID
+ # error returned if not a file, or does not exist
+ # reply has stream following, containing an encoded file IN STREAM ORDER
+ # (use GetObject to get it in file order)
+
+
+SetReplacementFileAttributes 32 Command(Success) StreamWithCommand
+ int64 InDirectory
+ int64 AttributesHash
+ Filename Filename
+ # stream follows containing attributes
+
+
+DeleteFile 33 Command(Success)
+ int64 InDirectory
+ Filename Filename
+ # will return 0 if the object couldn't be found in the specified directory
+
+
+GetBlockIndexByID 34 Command(Success)
+ int64 ObjectID
+
+ # stream of the block index follows the reply
+ # returns an error if the object didn't exist
+
+
+GetBlockIndexByName 35 Command(Success)
+ int64 InDirectory
+ Filename Filename
+
+ # Success object contains the found ID -- or 0 if the entry wasn't found in the directory
+ # stream of the block index follows the reply if found ID != 0
+
+
+# -------------------------------------------------------------------------------------
+# Information commands
+# -------------------------------------------------------------------------------------
+
+GetAccountUsage 40 Command(AccountUsage)
+ # no data members
+
+AccountUsage 41 Reply
+ int64 BlocksUsed
+ int64 BlocksInOldFiles
+ int64 BlocksInDeletedFiles
+ int64 BlocksInDirectories
+ int64 BlocksSoftLimit
+ int64 BlocksHardLimit
+ int32 BlockSize
diff --git a/bin/bbstored/bbstored-certs b/bin/bbstored/bbstored-certs
new file mode 100755
index 00000000..d1fa8dea
--- /dev/null
+++ b/bin/bbstored/bbstored-certs
@@ -0,0 +1,319 @@
+#!/usr/bin/perl
+use strict;
+
+# validity period for root certificates -- default is a very long time
+my $root_sign_period = '10000';
+
+# but less so for client certificates
+my $sign_period = '5000';
+
+# check and get command line parameters
+if($#ARGV < 1)
+{
+ print <<__E;
+
+bbstored certificates utility.
+
+Bad command line parameters.
+Usage:
+ bbstored-certs certs-dir command [arguments]
+
+certs-dir is the directory holding the root keys and certificates for the backup system
+command is the action to perform, taking parameters.
+
+Commands are
+
+ init
+ -- generate initial root certificates (certs-dir must not already exist)
+ sign certificate-name
+ -- sign a client certificate
+ sign-server certificate-name
+ -- sign a server certificate
+
+Signing requires confirmation that the certificate is correct and should be signed.
+
+__E
+ exit(1);
+}
+
+# check for OPENSSL_CONF environment var being set
+if(exists $ENV{'OPENSSL_CONF'})
+{
+ print <<__E;
+
+---------------------------------------
+
+WARNING:
+ You have the OPENSSL_CONF environment variable set.
+ Use of non-standard openssl configs may cause problems.
+
+---------------------------------------
+
+__E
+}
+
+# directory structure:
+#
+# roots/
+# clientCA.pem -- root certificate for client (used on server)
+# serverCA.pem -- root certificate for servers (used on clients)
+# keys/
+# clientRootKey.pem -- root key for clients
+# serverRootKey.pem -- root key for servers
+# servers/
+# hostname.pem -- certificate for server 'hostname'
+# clients/
+# account.pem -- certficiate for account 'account' (ID in hex)
+#
+
+
+# check parameters
+my ($cert_dir,$command,@args) = @ARGV;
+
+# check directory exists
+if($command ne 'init')
+{
+ if(!-d $cert_dir)
+ {
+ die "$cert_dir does not exist";
+ }
+}
+
+# run command
+if($command eq 'init') {&cmd_init;}
+elsif($command eq 'sign') {&cmd_sign;}
+elsif($command eq 'sign-server') {&cmd_sign_server;}
+else
+{
+ die "Unknown command $command"
+}
+
+sub cmd_init
+{
+ # create directories
+ unless(mkdir($cert_dir,0700)
+ && mkdir($cert_dir.'/roots',0700)
+ && mkdir($cert_dir.'/keys',0700)
+ && mkdir($cert_dir.'/servers',0700)
+ && mkdir($cert_dir.'/clients',0700))
+ {
+ die "Failed to create directory structure"
+ }
+
+ # create root keys and certrs
+ cmd_init_create_root('client');
+ cmd_init_create_root('server');
+}
+
+sub cmd_init_create_root
+{
+ my $entity = $_[0];
+
+ my $cert = "$cert_dir/roots/".$entity.'CA.pem';
+ my $serial = "$cert_dir/roots/".$entity.'CA.srl';
+ my $key = "$cert_dir/keys/".$entity.'RootKey.pem';
+ my $csr = "$cert_dir/keys/".$entity.'RootCSR.pem';
+
+ # generate key
+ if(system("openssl genrsa -out $key 2048") != 0)
+ {
+ die "Couldn't generate private key."
+ }
+
+ # make CSR
+ die "Couldn't run openssl for CSR generation" unless
+ open(CSR,"|openssl req -new -key $key -sha1 -out $csr");
+ print CSR <<__E;
+.
+.
+.
+.
+.
+Backup system $entity root
+.
+.
+.
+
+__E
+ close CSR;
+ print "\n\n";
+ die "Certificate request wasn't created.\n" unless -f $csr;
+
+ # sign it to make a self-signed root CA key
+ if(system("openssl x509 -req -in $csr -sha1 -extensions v3_ca -signkey $key -out $cert -days $root_sign_period") != 0)
+ {
+ die "Couldn't generate root certificate."
+ }
+
+ # write the initial serial number
+ open SERIAL,">$serial" or die "Can't open $serial for writing";
+ print SERIAL "00\n";
+ close SERIAL;
+}
+
+sub cmd_sign
+{
+ my $csr = $args[0];
+
+ if(!-f $csr)
+ {
+ die "$csr does not exist";
+ }
+
+ # get the common name specified in this certificate
+ my $common_name = get_csr_common_name($csr);
+
+ # look OK?
+ unless($common_name =~ m/\ABACKUP-([A-Fa-f0-9]+)\Z/)
+ {
+ die "The certificate presented does not appear to be a backup client certificate"
+ }
+
+ my $acc = $1;
+
+ # check against filename
+ if(!($csr =~ m/(\A|\/)([A-Fa-f0-9]+)-/) || $2 ne $acc)
+ {
+ die "Certificate request filename does not match name in certificate ($common_name)"
+ }
+
+ print <<__E;
+
+This certificate is for backup account
+
+ $acc
+
+Ensure this matches the account number you are expecting. The filename is
+
+ $csr
+
+which should include this account number, and additionally, you should check
+that you received it from the right person.
+
+Signing the wrong certificate compromises the security of your backup system.
+
+Would you like to sign this certificate? (type 'yes' to confirm)
+__E
+
+ return unless get_confirmation();
+
+ # out certificate
+ my $out_cert = "$cert_dir/clients/$acc"."-cert.pem";
+
+ # sign it!
+ if(system("openssl x509 -req -in $csr -sha1 -extensions usr_crt -CA $cert_dir/roots/clientCA.pem -CAkey $cert_dir/keys/clientRootKey.pem -out $out_cert -days $sign_period") != 0)
+ {
+ die "Signing failed"
+ }
+
+ # tell user what to do next
+ print <<__E;
+
+
+Certificate signed.
+
+Send the files
+
+ $out_cert
+ $cert_dir/roots/serverCA.pem
+
+to the client.
+
+__E
+}
+
+sub cmd_sign_server
+{
+ my $csr = $args[0];
+
+ if(!-f $csr)
+ {
+ die "$csr does not exist";
+ }
+
+ # get the common name specified in this certificate
+ my $common_name = get_csr_common_name($csr);
+
+ # look OK?
+ if($common_name !~ m/\A[-a-zA-Z0-9.]+\Z/)
+ {
+ die "Invalid server name"
+ }
+
+ print <<__E;
+
+This certificate is for backup server
+
+ $common_name
+
+Signing the wrong certificate compromises the security of your backup system.
+
+Would you like to sign this certificate? (type 'yes' to confirm)
+__E
+
+ return unless get_confirmation();
+
+ # out certificate
+ my $out_cert = "$cert_dir/servers/$common_name"."-cert.pem";
+
+ # sign it!
+ if(system("openssl x509 -req -in $csr -sha1 -extensions usr_crt -CA $cert_dir/roots/serverCA.pem -CAkey $cert_dir/keys/serverRootKey.pem -out $out_cert -days $sign_period") != 0)
+ {
+ die "Signing failed"
+ }
+
+ # tell user what to do next
+ print <<__E;
+
+
+Certificate signed.
+
+Install the files
+
+ $out_cert
+ $cert_dir/roots/clientCA.pem
+
+on the server.
+
+__E
+}
+
+
+sub get_csr_common_name
+{
+ my $csr = $_[0];
+
+ open CSRTEXT,"openssl req -text -in $csr |" or die "Can't open openssl for reading";
+
+ my $subject;
+ while(<CSRTEXT>)
+ {
+ $subject = $1 if m/Subject:.+?CN=([-\.\w]+)/
+ }
+ close CSRTEXT;
+
+ if($subject eq '')
+ {
+ die "No subject found in CSR $csr"
+ }
+
+ return $subject
+}
+
+sub get_confirmation()
+{
+ my $line = <STDIN>;
+ chomp $line;
+ if(lc $line ne 'yes')
+ {
+ print "CANCELLED\n";
+ return 0;
+ }
+
+ return 1;
+}
+
+
+
+
+
diff --git a/bin/bbstored/bbstored-config b/bin/bbstored/bbstored-config
new file mode 100755
index 00000000..0032f4c6
--- /dev/null
+++ b/bin/bbstored/bbstored-config
@@ -0,0 +1,242 @@
+#!/usr/bin/perl
+use strict;
+
+# should be running as root
+if($> != 0)
+{
+ printf "\nWARNING: this should be run as root\n\n"
+}
+
+# check and get command line parameters
+if($#ARGV < 2)
+{
+ print <<__E;
+
+Setup bbstored config utility.
+
+Bad command line parameters.
+Usage:
+ bbstored-config config-dir server-hostname username [raidfile-config]
+
+config-dir usually /etc/box
+server-hostname is the hostname used by clients to connect to this server
+username is the user to run the server under
+raidfile-config is optional. Use if you have a non-standard raidfile.conf file.
+
+__E
+ exit(1);
+}
+
+# check for OPENSSL_CONF environment var being set
+if(exists $ENV{'OPENSSL_CONF'})
+{
+ print <<__E;
+
+---------------------------------------
+
+WARNING:
+ You have the OPENSSL_CONF environment variable set.
+ Use of non-standard openssl configs may cause problems.
+
+---------------------------------------
+
+__E
+}
+
+# default locations
+my $default_config_location = '/etc/box/bbstored.conf';
+
+# command line parameters
+my ($config_dir,$server,$username,$raidfile_config) = @ARGV;
+
+$raidfile_config = $config_dir . '/raidfile.conf' unless $raidfile_config ne '';
+
+# check server exists, but don't bother checking that it's actually this machine.
+{
+ my @r = gethostbyname($server);
+ if($#r < 0)
+ {
+ die "Server '$server' not found. (check server name, test DNS lookup failed.)"
+ }
+}
+
+# check this exists
+if(!-f $raidfile_config)
+{
+ print "The RaidFile configuration file $raidfile_config doesn't exist.\nYou may need to create it with raidfile-config.\nWon't configure bbstored without it.\n";
+ exit(1);
+}
+
+# check that the user exists
+die "You shouldn't run bbstored as root" if $username eq 'root';
+my $user_uid = 0;
+(undef,undef,$user_uid) = getpwnam($username);
+if($user_uid == 0)
+{
+ die "User $username doesn't exist\n";
+}
+
+# check that directories are writeable
+open RAIDCONF,$raidfile_config or die "Can't open $raidfile_config";
+{
+ my %done = ();
+ while(<RAIDCONF>)
+ {
+ next unless m/Dir\d\s*=\s*(.+)/;
+ my $d = $1;
+ $d = $d.'/backup' if -e $d.'/backup';
+ print "Checking permissions on $d\n";
+ my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($d);
+ my $req_perms = ($uid == $user_uid)?0700:0007;
+ if(($mode & $req_perms) != $req_perms)
+ {
+ print "$username doesn't appear to have the necessary permissions on $d\n";
+ print "Either adjust permissions, or create a directory 'backup' inside the\n";
+ print "directory specified in raidfile.conf which is writable.\n";
+ exit(1);
+ }
+ }
+}
+close RAIDCONF;
+
+# ssl stuff
+my $private_key = "$config_dir/bbstored/$server-key.pem";
+my $certificate_request = "$config_dir/bbstored/$server-csr.pem";
+my $certificate = "$config_dir/bbstored/$server-cert.pem";
+my $ca_root_cert = "$config_dir/bbstored/clientCA.pem";
+
+# other files
+my $config_file = "$config_dir/bbstored.conf";
+my $accounts_file = "$config_dir/bbstored/accounts.txt";
+
+# summarise configuration
+
+print <<__E;
+
+Setup bbstored config utility.
+
+Configuration:
+ Writing configuration file: $config_file
+ Writing empty accounts file: $accounts_file
+ Server hostname: $server
+ RaidFile config: $raidfile_config
+
+__E
+
+# create directories
+if(!-d $config_dir)
+{
+ print "Creating $config_dir...\n";
+ mkdir $config_dir,0755 or die "Can't create $config_dir";
+}
+
+if(!-d "$config_dir/bbstored")
+{
+ print "Creating $config_dir/bbstored\n";
+ mkdir "$config_dir/bbstored",0755 or die "Can't create $config_dir/bbstored";
+}
+
+# create blank accounts file
+if(!-f $accounts_file)
+{
+ print "Creating blank accounts file\n";
+ open ACC,">$accounts_file";
+ close ACC;
+}
+
+# generate the private key for the server
+if(!-f $private_key)
+{
+ print "Generating private key...\n";
+ if(system("openssl genrsa -out $private_key 2048") != 0)
+ {
+ die "Couldn't generate private key."
+ }
+}
+
+# generate a certificate request
+if(!-f $certificate_request)
+{
+ die "Couldn't run openssl for CSR generation" unless
+ open(CSR,"|openssl req -new -key $private_key -sha1 -out $certificate_request");
+ print CSR <<__E;
+.
+.
+.
+.
+.
+$server
+.
+.
+.
+
+__E
+ close CSR;
+ print "\n\n";
+ die "Certificate request wasn't created.\n" unless -f $certificate_request
+}
+
+# write the configuration file
+print "Writing configuration file $config_file\n";
+open CONFIG,">$config_file" or die "Can't open config file for writing";
+print CONFIG <<__E;
+
+RaidFileConf = $raidfile_config
+AccountDatabase = $accounts_file
+
+# Uncomment this line to see exactly what commands are being received from clients.
+# ExtendedLogging = yes
+
+# scan all accounts for files which need deleting every 15 minutes.
+
+TimeBetweenHousekeeping = 900
+
+Server
+{
+ PidFile = /var/run/bbstored.pid
+ User = $username
+ ListenAddresses = inet:$server
+ CertificateFile = $certificate
+ PrivateKeyFile = $private_key
+ TrustedCAsFile = $ca_root_cert
+}
+
+
+__E
+
+close CONFIG;
+
+# explain to the user what they need to do next
+my $daemon_args = ($config_file eq $default_config_location)?'':" $config_file";
+
+print <<__E;
+
+===================================================================
+
+bbstored basic configuration complete.
+
+What you need to do now...
+
+1) Sign $certificate_request
+ using the bbstored-certs utility.
+
+2) Install the server certificate and root CA certificate as
+ $certificate
+ $ca_root_cert
+
+3) You may wish to read the configuration file
+ $config_file
+ and adjust as appropraite.
+
+4) Create accounts with bbstoreaccounts
+
+5) Start the backup store daemon with the command
+ /usr/local/bin/bbstored$daemon_args
+ in /etc/rc.local, or your local equivalent.
+
+===================================================================
+
+__E
+
+
+
diff --git a/bin/bbstored/bbstored.cpp b/bin/bbstored/bbstored.cpp
new file mode 100755
index 00000000..3eaf2639
--- /dev/null
+++ b/bin/bbstored/bbstored.cpp
@@ -0,0 +1,25 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbstored.cpp
+// Purpose: main file for backup store daemon
+// Created: 2003/08/20
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+#include "BackupStoreDaemon.h"
+#include "MainHelper.h"
+
+#include "MemLeakFindOn.h"
+
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_START
+
+ BackupStoreDaemon daemon;
+ return daemon.Main(BOX_FILE_BBSTORED_DEFAULT_CONFIG, argc, argv);
+
+ MAINHELPER_END
+}
+