summaryrefslogtreecommitdiff
path: root/bin/bbackupd
diff options
context:
space:
mode:
Diffstat (limited to 'bin/bbackupd')
-rwxr-xr-xbin/bbackupd/BackupClientContext.cpp453
-rwxr-xr-xbin/bbackupd/BackupClientContext.h156
-rwxr-xr-xbin/bbackupd/BackupClientDeleteList.cpp195
-rwxr-xr-xbin/bbackupd/BackupClientDeleteList.h51
-rwxr-xr-xbin/bbackupd/BackupClientDirectoryRecord.cpp1203
-rwxr-xr-xbin/bbackupd/BackupClientDirectoryRecord.h115
-rwxr-xr-xbin/bbackupd/BackupClientInodeToIDMap.cpp279
-rwxr-xr-xbin/bbackupd/BackupClientInodeToIDMap.h67
-rwxr-xr-xbin/bbackupd/BackupDaemon.cpp1624
-rwxr-xr-xbin/bbackupd/BackupDaemon.h166
-rwxr-xr-xbin/bbackupd/bbackupd-config525
-rwxr-xr-xbin/bbackupd/bbackupd.cpp26
12 files changed, 4860 insertions, 0 deletions
diff --git a/bin/bbackupd/BackupClientContext.cpp b/bin/bbackupd/BackupClientContext.cpp
new file mode 100755
index 00000000..08a203c1
--- /dev/null
+++ b/bin/bbackupd/BackupClientContext.cpp
@@ -0,0 +1,453 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientContext.cpp
+// Purpose: Keep track of context
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <syslog.h>
+
+#include "BoxPortsAndFiles.h"
+#include "BoxTime.h"
+#include "BackupClientContext.h"
+#include "SocketStreamTLS.h"
+#include "Socket.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreException.h"
+#include "BackupDaemon.h"
+#include "autogen_BackupProtocolClient.h"
+
+#include "MemLeakFindOn.h"
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::BackupClientContext(BackupDaemon &, TLSContext &, const std::string &, int32_t, bool)
+// Purpose: Constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientContext::BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
+ int32_t AccountNumber, bool ExtendedLogging)
+ : mrDaemon(rDaemon),
+ mrTLSContext(rTLSContext),
+ mHostname(rHostname),
+ mAccountNumber(AccountNumber),
+ mpSocket(0),
+ mpConnection(0),
+ mExtendedLogging(ExtendedLogging),
+ mClientStoreMarker(ClientStoreMarker_NotKnown),
+ mpDeleteList(0),
+ mpCurrentIDMap(0),
+ mpNewIDMap(0),
+ mStorageLimitExceeded(false),
+ mpExcludeFiles(0),
+ mpExcludeDirs(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::~BackupClientContext()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientContext::~BackupClientContext()
+{
+ CloseAnyOpenConnection();
+
+ // Delete delete list
+ if(mpDeleteList != 0)
+ {
+ delete mpDeleteList;
+ mpDeleteList = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetConnection()
+// Purpose: Returns the connection, making the connection and logging into
+// the backup store if necessary.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupProtocolClient &BackupClientContext::GetConnection()
+{
+ // Already got it? Just return it.
+ if(mpConnection != 0)
+ {
+ return *mpConnection;
+ }
+
+ // Get a socket connection
+ if(mpSocket == 0)
+ {
+ mpSocket = new SocketStreamTLS;
+ ASSERT(mpSocket != 0); // will have exceptioned if this was a problem
+ }
+
+ try
+ {
+ // Defensive.
+ if(mpConnection != 0)
+ {
+ delete mpConnection;
+ mpConnection = 0;
+ }
+
+ // Log intention
+ ::syslog(LOG_INFO, "Opening connection to server %s...", mHostname.c_str());
+
+ // Connect!
+ mpSocket->Open(mrTLSContext, Socket::TypeINET, mHostname.c_str(), BOX_PORT_BBSTORED);
+
+ // And create a procotol object
+ mpConnection = new BackupProtocolClient(*mpSocket);
+
+ // Set logging option
+ mpConnection->SetLogToSysLog(mExtendedLogging);
+
+ // Handshake
+ mpConnection->Handshake();
+
+ // Check the version of the server
+ {
+ std::auto_ptr<BackupProtocolClientVersion> serverVersion(mpConnection->QueryVersion(BACKUP_STORE_SERVER_VERSION));
+ if(serverVersion->GetVersion() != BACKUP_STORE_SERVER_VERSION)
+ {
+ THROW_EXCEPTION(BackupStoreException, WrongServerVersion)
+ }
+ }
+
+ // Login -- if this fails, the Protocol will exception
+ std::auto_ptr<BackupProtocolClientLoginConfirmed> loginConf(mpConnection->QueryLogin(mAccountNumber, 0 /* read/write */));
+
+ // Check that the client store marker is the one we expect
+ if(mClientStoreMarker != ClientStoreMarker_NotKnown)
+ {
+ if(loginConf->GetClientStoreMarker() != mClientStoreMarker)
+ {
+ // Not good... finish the connection, abort, etc, ignoring errors
+ try
+ {
+ mpConnection->QueryFinished();
+ mpSocket->Shutdown();
+ mpSocket->Close();
+ }
+ catch(...)
+ {
+ // IGNORE
+ }
+
+ // Then throw an exception about this
+ THROW_EXCEPTION(BackupStoreException, ClientMarkerNotAsExpected)
+ }
+ }
+
+ // Log success
+ ::syslog(LOG_INFO, "Connection made, login successful");
+
+ // Check to see if there is any space available on the server
+ int64_t softLimit = loginConf->GetBlocksSoftLimit();
+ int64_t hardLimit = loginConf->GetBlocksHardLimit();
+ // Threshold for uploading new stuff
+ int64_t stopUploadThreshold = softLimit + ((hardLimit - softLimit) / 3);
+ if(loginConf->GetBlocksUsed() > stopUploadThreshold)
+ {
+ // no -- flag so only things like deletions happen
+ mStorageLimitExceeded = true;
+ // Log
+ ::syslog(LOG_INFO, "Exceeded storage limits on server -- not uploading changes to files");
+ }
+ }
+ catch(...)
+ {
+ // Clean up.
+ delete mpConnection;
+ mpConnection = 0;
+ delete mpSocket;
+ mpSocket = 0;
+ throw;
+ }
+
+ return *mpConnection;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::CloseAnyOpenConnection()
+// Purpose: Closes a connection, if it's open
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupClientContext::CloseAnyOpenConnection()
+{
+ if(mpConnection)
+ {
+ try
+ {
+ // Need to set a client store marker?
+ if(mClientStoreMarker == ClientStoreMarker_NotKnown)
+ {
+ // Yes, choose one, the current time will do
+ int64_t marker = GetCurrentBoxTime();
+
+ // Set it on the store
+ mpConnection->QuerySetClientStoreMarker(marker);
+
+ // Record it so that it can be picked up later.
+ mClientStoreMarker = marker;
+ }
+
+ // Quit nicely
+ mpConnection->QueryFinished();
+ }
+ catch(...)
+ {
+ // Ignore errors here
+ }
+
+ // Delete it anyway.
+ delete mpConnection;
+ mpConnection = 0;
+ }
+
+ if(mpSocket)
+ {
+ try
+ {
+ // Be nice about closing the socket
+ mpSocket->Shutdown();
+ mpSocket->Close();
+ }
+ catch(...)
+ {
+ // Ignore errors
+ }
+
+ // Delete object
+ delete mpSocket;
+ mpSocket = 0;
+ }
+
+ // Delete any pending list
+ if(mpDeleteList != 0)
+ {
+ delete mpDeleteList;
+ mpDeleteList = 0;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetTimeout()
+// Purpose: Gets the current timeout time.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+int BackupClientContext::GetTimeout() const
+{
+ if(mpConnection)
+ {
+ return mpConnection->GetTimeout();
+ }
+
+ return (15*60*1000);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetDeleteList()
+// Purpose: Returns the delete list, creating one if necessary
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList &BackupClientContext::GetDeleteList()
+{
+ // Already created?
+ if(mpDeleteList == 0)
+ {
+ mpDeleteList = new BackupClientDeleteList;
+ }
+
+ // Return reference to object
+ return *mpDeleteList;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name:
+// Purpose:
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientContext::PerformDeletions()
+{
+ // Got a list?
+ if(mpDeleteList == 0)
+ {
+ // Nothing to do
+ return;
+ }
+
+ // Delegate to the delete list object
+ mpDeleteList->PerformDeletions(*this);
+
+ // Delete the object
+ delete mpDeleteList;
+ mpDeleteList = 0;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetCurrentIDMap() const
+// Purpose: Return a (const) reference to the current ID map
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+const BackupClientInodeToIDMap &BackupClientContext::GetCurrentIDMap() const
+{
+ ASSERT(mpCurrentIDMap != 0);
+ if(mpCurrentIDMap == 0)
+ {
+ THROW_EXCEPTION(CommonException, Internal)
+ }
+ return *mpCurrentIDMap;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::GetNewIDMap() const
+// Purpose: Return a reference to the new ID map
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap &BackupClientContext::GetNewIDMap() const
+{
+ ASSERT(mpNewIDMap != 0);
+ if(mpNewIDMap == 0)
+ {
+ THROW_EXCEPTION(CommonException, Internal)
+ }
+ return *mpNewIDMap;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientContext::FindFilename(int64_t, int64_t, std::string &, bool &) const
+// Purpose: Attempts to find the pathname of an object with a given ID on the server.
+// Returns true if it can be found, in which case rPathOut is the local filename,
+// and rIsDirectoryOut == true if the local object is a directory.
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupClientContext::FindFilename(int64_t ObjectID, int64_t ContainingDirectory, std::string &rPathOut, bool &rIsDirectoryOut,
+ bool &rIsCurrentVersionOut, box_time_t *pModTimeOnServer, box_time_t *pAttributesHashOnServer, BackupStoreFilenameClear *pLeafname)
+{
+ // Make a connection to the server
+ BackupProtocolClient &connection(GetConnection());
+
+ // Request filenames from the server, in a "safe" manner to ignore errors properly
+ {
+ BackupProtocolClientGetObjectName send(ObjectID, ContainingDirectory);
+ connection.Send(send);
+ }
+ std::auto_ptr<BackupProtocolObjectCl> preply(connection.Receive());
+
+ // Is it of the right type?
+ if(preply->GetType() != BackupProtocolClientObjectName::TypeID)
+ {
+ // Was an error or something
+ return false;
+ }
+
+ // Cast to expected type.
+ BackupProtocolClientObjectName *names = (BackupProtocolClientObjectName *)(preply.get());
+
+ // Anything found?
+ int32_t numElements = names->GetNumNameElements();
+ if(numElements <= 0)
+ {
+ // No.
+ return false;
+ }
+
+ // Get the stream containing all the names
+ std::auto_ptr<IOStream> nameStream(connection.ReceiveStream());
+
+ // Path
+ std::string path;
+
+ // Remember this is in reverse order!
+ for(int l = 0; l < numElements; ++l)
+ {
+ BackupStoreFilenameClear elementName;
+ elementName.ReadFromStream(*nameStream, GetTimeout());
+
+ // Store leafname for caller?
+ if(l == 0 && pLeafname)
+ {
+ *pLeafname = elementName;
+ }
+
+ // Is it part of the filename in the location?
+ if(l < (numElements - 1))
+ {
+ // Part of filename within
+ path = (path.empty())?(elementName.GetClearFilename()):(elementName.GetClearFilename() + DIRECTORY_SEPARATOR_ASCHAR + path);
+ }
+ else
+ {
+ // Location name -- look up in daemon's records
+ std::string locPath;
+ if(!mrDaemon.FindLocationPathName(elementName.GetClearFilename(), locPath))
+ {
+ // Didn't find the location... so can't give the local filename
+ return false;
+ }
+
+ // Add in location path
+ path = (path.empty())?(locPath):(locPath + DIRECTORY_SEPARATOR_ASCHAR + path);
+ }
+ }
+
+ // Is it a directory?
+ rIsDirectoryOut = ((names->GetFlags() & BackupProtocolClientListDirectory::Flags_Dir) == BackupProtocolClientListDirectory::Flags_Dir);
+
+ // Is it the current version?
+ rIsCurrentVersionOut = ((names->GetFlags() & (BackupProtocolClientListDirectory::Flags_OldVersion | BackupProtocolClientListDirectory::Flags_Deleted)) == 0);
+
+ // And other information which may be required
+ if(pModTimeOnServer) *pModTimeOnServer = names->GetModificationTime();
+ if(pAttributesHashOnServer) *pAttributesHashOnServer = names->GetAttributesHash();
+
+ // Tell caller about the pathname
+ rPathOut = path;
+
+ // Found
+ return true;
+}
+
+
diff --git a/bin/bbackupd/BackupClientContext.h b/bin/bbackupd/BackupClientContext.h
new file mode 100755
index 00000000..3933dbed
--- /dev/null
+++ b/bin/bbackupd/BackupClientContext.h
@@ -0,0 +1,156 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientContext.h
+// Purpose: Keep track of context
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTCONTEXT__H
+#define BACKUPCLIENTCONTEXT__H
+
+#include "BoxTime.h"
+#include "BackupClientDeleteList.h"
+#include "ExcludeList.h"
+
+class TLSContext;
+class BackupProtocolClient;
+class SocketStreamTLS;
+class BackupClientInodeToIDMap;
+class BackupDaemon;
+class BackupStoreFilenameClear;
+
+#include <string>
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientContext
+// Purpose:
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupClientContext
+{
+public:
+ BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
+ int32_t AccountNumber, bool ExtendedLogging);
+ ~BackupClientContext();
+private:
+ BackupClientContext(const BackupClientContext &);
+public:
+
+ BackupProtocolClient &GetConnection();
+
+ void CloseAnyOpenConnection();
+
+ int GetTimeout() const;
+
+ BackupClientDeleteList &GetDeleteList();
+ void PerformDeletions();
+
+ enum
+ {
+ ClientStoreMarker_NotKnown = 0
+ };
+
+ void SetClientStoreMarker(int64_t ClientStoreMarker) {mClientStoreMarker = ClientStoreMarker;}
+ int64_t GetClientStoreMarker() const {return mClientStoreMarker;}
+
+ bool StorageLimitExceeded() {return mStorageLimitExceeded;}
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::SetIDMaps(const BackupClientInodeToIDMap *, BackupClientInodeToIDMap *)
+ // Purpose: Store pointers to the Current and New ID maps
+ // Created: 11/11/03
+ //
+ // --------------------------------------------------------------------------
+ void SetIDMaps(const BackupClientInodeToIDMap *pCurrent, BackupClientInodeToIDMap *pNew)
+ {
+ ASSERT(pCurrent != 0);
+ ASSERT(pNew != 0);
+ mpCurrentIDMap = pCurrent;
+ mpNewIDMap = pNew;
+ }
+ const BackupClientInodeToIDMap &GetCurrentIDMap() const;
+ BackupClientInodeToIDMap &GetNewIDMap() const;
+
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::SetExcludeLists(ExcludeList *, ExcludeList *)
+ // Purpose: Sets the exclude lists for the operation. Can be 0.
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ void SetExcludeLists(ExcludeList *pExcludeFiles, ExcludeList *pExcludeDirs)
+ {
+ mpExcludeFiles = pExcludeFiles;
+ mpExcludeDirs = pExcludeDirs;
+ }
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::ExcludeFile(const std::string &)
+ // Purpose: Returns true is this file should be excluded from the backup
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ inline bool ExcludeFile(const std::string &rFullFilename)
+ {
+ if(mpExcludeFiles != 0)
+ {
+ return mpExcludeFiles->IsExcluded(rFullFilename);
+ }
+ // If no list, don't exclude anything
+ return false;
+ }
+
+ // --------------------------------------------------------------------------
+ //
+ // Function
+ // Name: BackupClientContext::ExcludeDir(const std::string &)
+ // Purpose: Returns true is this directory should be excluded from the backup
+ // Created: 28/1/04
+ //
+ // --------------------------------------------------------------------------
+ inline bool ExcludeDir(const std::string &rFullDirName)
+ {
+ if(mpExcludeDirs != 0)
+ {
+ return mpExcludeDirs->IsExcluded(rFullDirName);
+ }
+ // If no list, don't exclude anything
+ return false;
+ }
+
+ // Utility functions -- may do a lot of work
+ bool FindFilename(int64_t ObjectID, int64_t ContainingDirectory, std::string &rPathOut, bool &rIsDirectoryOut,
+ bool &rIsCurrentVersionOut, box_time_t *pModTimeOnServer = 0, box_time_t *pAttributesHashOnServer = 0,
+ BackupStoreFilenameClear *pLeafname = 0); // not const as may connect to server
+
+private:
+ BackupDaemon &mrDaemon;
+ TLSContext &mrTLSContext;
+ std::string mHostname;
+ int32_t mAccountNumber;
+ SocketStreamTLS *mpSocket;
+ BackupProtocolClient *mpConnection;
+ bool mExtendedLogging;
+ int64_t mClientStoreMarker;
+ BackupClientDeleteList *mpDeleteList;
+ const BackupClientInodeToIDMap *mpCurrentIDMap;
+ BackupClientInodeToIDMap *mpNewIDMap;
+ bool mStorageLimitExceeded;
+ ExcludeList *mpExcludeFiles;
+ ExcludeList *mpExcludeDirs;
+};
+
+
+#endif // BACKUPCLIENTCONTEXT__H
+
diff --git a/bin/bbackupd/BackupClientDeleteList.cpp b/bin/bbackupd/BackupClientDeleteList.cpp
new file mode 100755
index 00000000..f6d8e0dc
--- /dev/null
+++ b/bin/bbackupd/BackupClientDeleteList.cpp
@@ -0,0 +1,195 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDeleteList.cpp
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <algorithm>
+
+#include "BackupClientDeleteList.h"
+#include "BackupClientContext.h"
+#include "autogen_BackupProtocolClient.h"
+
+#include "MemLeakFindOn.h"
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::BackupClientDeleteList()
+// Purpose: Constructor
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList::BackupClientDeleteList()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::~BackupClientDeleteList()
+// Purpose: Destructor
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientDeleteList::~BackupClientDeleteList()
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t)
+// Purpose: Add a directory to the list of directories to be deleted.
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
+{
+ // Only add the delete to the list if it's not in the "no delete" set
+ if(mDirectoryNoDeleteList.find(ObjectID) == mDirectoryNoDeleteList.end())
+ {
+ // Not in the list, so should delete it
+ mDirectoryList.push_back(ObjectID);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::AddFileDelete(int64_t, BackupStoreFilenameClear &)
+// Purpose:
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+{
+ // Try to find it in the no delete list
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileNoDeleteList.begin());
+ while(delEntry != mFileNoDeleteList.end())
+ {
+ if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ {
+ // Found!
+ break;
+ }
+ ++delEntry;
+ }
+
+ // Only add it to the delete list if it wasn't in the no delete list
+ if(delEntry == mFileNoDeleteList.end())
+ {
+ mFileList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
+// Purpose: Perform all the pending deletes
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
+{
+ // Anything to do?
+ if(mDirectoryList.empty() && mFileList.empty())
+ {
+ // Nothing!
+ return;
+ }
+
+ // Get a connection
+ BackupProtocolClient &connection(rContext.GetConnection());
+
+ // Do the deletes
+ for(std::vector<int64_t>::iterator i(mDirectoryList.begin()); i != mDirectoryList.end(); ++i)
+ {
+ connection.QueryDeleteDirectory(*i);
+ }
+
+ // Clear the directory list
+ mDirectoryList.clear();
+
+ // Delete the files
+ for(std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator i(mFileList.begin()); i != mFileList.end(); ++i)
+ {
+ connection.QueryDeleteFile(i->first, i->second);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::StopDirectoryDeletion(int64_t)
+// Purpose: Stop a directory being deleted
+// Created: 19/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
+{
+ // First of all, is it in the delete vector?
+ std::vector<int64_t>::iterator delEntry(std::find(mDirectoryList.begin(), mDirectoryList.end(), ObjectID));
+ if(delEntry != mDirectoryList.end())
+ {
+ // erase this entry
+ mDirectoryList.erase(delEntry);
+ }
+ else
+ {
+ // Haven't been asked to delete it yet, put it in the no delete list
+ mDirectoryNoDeleteList.insert(ObjectID);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDeleteList::StopFileDeletion(int64_t, const BackupStoreFilename &)
+// Purpose: Stop a file from being deleted
+// Created: 19/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+{
+ // Find this in the delete list
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileList.begin());
+ while(delEntry != mFileList.end())
+ {
+ if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ {
+ // Found!
+ break;
+ }
+ ++delEntry;
+ }
+
+ if(delEntry != mFileList.end())
+ {
+ // erase this entry
+ mFileList.erase(delEntry);
+ }
+ else
+ {
+ // Haven't been asked to delete it yet, put it in the no delete list
+ mFileNoDeleteList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ }
+
+}
+
+
+
+
+
diff --git a/bin/bbackupd/BackupClientDeleteList.h b/bin/bbackupd/BackupClientDeleteList.h
new file mode 100755
index 00000000..5940cf50
--- /dev/null
+++ b/bin/bbackupd/BackupClientDeleteList.h
@@ -0,0 +1,51 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDeleteList.h
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTDELETELIST__H
+#define BACKUPCLIENTDELETELIST__H
+
+#include "BackupStoreFilename.h"
+
+class BackupClientContext;
+
+#include <vector>
+#include <utility>
+#include <set>
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientDeleteList
+// Purpose: List of pending deletes for backup
+// Created: 10/11/03
+//
+// --------------------------------------------------------------------------
+class BackupClientDeleteList
+{
+public:
+ BackupClientDeleteList();
+ ~BackupClientDeleteList();
+
+ void AddDirectoryDelete(int64_t ObjectID);
+ void AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+
+ void StopDirectoryDeletion(int64_t ObjectID);
+ void StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+
+ void PerformDeletions(BackupClientContext &rContext);
+
+private:
+ std::vector<int64_t> mDirectoryList;
+ std::set<int64_t> mDirectoryNoDeleteList; // note: things only get in this list if they're not present in mDirectoryList when they are 'added'
+ std::vector<std::pair<int64_t, BackupStoreFilename> > mFileList;
+ std::vector<std::pair<int64_t, BackupStoreFilename> > mFileNoDeleteList;
+};
+
+#endif // BACKUPCLIENTDELETELIST__H
+
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.cpp b/bin/bbackupd/BackupClientDirectoryRecord.cpp
new file mode 100755
index 00000000..eb4a8343
--- /dev/null
+++ b/bin/bbackupd/BackupClientDirectoryRecord.cpp
@@ -0,0 +1,1203 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDirectoryRecord.cpp
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <string.h>
+
+#include "BackupClientDirectoryRecord.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupClientContext.h"
+#include "IOStream.h"
+#include "MemBlockStream.h"
+#include "CommonException.h"
+#include "CollectInBufferStream.h"
+#include "BackupStoreFile.h"
+#include "BackupClientInodeToIDMap.h"
+#include "FileModificationTime.h"
+#include "BackupDaemon.h"
+#include "BackupStoreException.h"
+
+#ifdef PLATFORM_LINUX
+ #include "LinuxWorkaround.h"
+#endif
+
+#include "MemLeakFindOn.h"
+
+typedef std::map<std::string, BackupStoreDirectory::Entry *> DecryptedEntriesMap_t;
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::BackupClientDirectoryRecord()
+// Purpose: Constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::BackupClientDirectoryRecord(int64_t ObjectID, const std::string &rSubDirName)
+ : mObjectID(ObjectID),
+ mSubDirName(rSubDirName),
+ mInitialSyncDone(false),
+ mSyncDone(false),
+ mpPendingEntries(0)
+{
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::~BackupClientDirectoryRecord()
+{
+ // Make deletion recursive
+ DeleteSubDirectories();
+
+ // Delete maps
+ if(mpPendingEntries != 0)
+ {
+ delete mpPendingEntries;
+ mpPendingEntries = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::DeleteSubDirectories();
+// Purpose: Delete all sub directory entries
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::DeleteSubDirectories()
+{
+ // Delete all pointers
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ i != mSubDirectories.end(); ++i)
+ {
+ delete i->second;
+ }
+
+ // Empty list
+ mSubDirectories.clear();
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &, int64_t, const std::string &, bool)
+// Purpose: Syncronise, recusively, a local directory with the server.
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &rParams, int64_t ContainingDirectoryID,
+ const std::string &rLocalPath, bool ThisDirHasJustBeenCreated)
+{
+ // Signal received by daemon?
+ if(rParams.mrDaemon.StopRun())
+ {
+ // Yes. Stop now.
+ THROW_EXCEPTION(BackupStoreException, SignalReceived)
+ }
+
+ // Start by making some flag changes, marking this sync as not done,
+ // and on the immediate sub directories.
+ mSyncDone = false;
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ i != mSubDirectories.end(); ++i)
+ {
+ i->second->mSyncDone = false;
+ }
+
+ // Work out the time in the future after which the file should be uploaded regardless.
+ // This is a simple way to avoid having too many problems with file servers when they have
+ // clients with badly out of sync clocks.
+ rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() + rParams.mMaxFileTimeInFuture;
+
+ // Build the current state checksum to compare against while getting info from dirs
+ // Note checksum is used locally only, so byte order isn't considered.
+ MD5Digest currentStateChecksum;
+
+ // Stat the directory, to get attribute info
+ {
+ struct stat st;
+ if(::stat(rLocalPath.c_str(), &st) != 0)
+ {
+ // The directory has probably been deleted, so just ignore this error.
+ // In a future scan, this deletion will be noticed, deleted from server, and this object deleted.
+ TRACE1("Stat failed for '%s' (directory)\n", rLocalPath.c_str());
+ return;
+ }
+ // Store inode number in map so directories are tracked in case they're renamed
+ {
+ BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+ idMap.AddToMap(st.st_ino, mObjectID, ContainingDirectoryID);
+ }
+ // Add attributes to checksum
+ currentStateChecksum.Add(&st.st_mode, sizeof(st.st_mode));
+ currentStateChecksum.Add(&st.st_uid, sizeof(st.st_uid));
+ currentStateChecksum.Add(&st.st_gid, sizeof(st.st_gid));
+ // Inode to be paranoid about things moving around
+ currentStateChecksum.Add(&st.st_ino, sizeof(st.st_ino));
+#ifndef PLATFORM_stat_NO_st_flags
+ currentStateChecksum.Add(&st.st_flags, sizeof(st.st_flags));
+#endif // n PLATFORM_stat_NO_st_flags
+ }
+
+ // Read directory entries, building arrays of names
+ // First, need to read the contents of the directory.
+ std::vector<std::string> dirs;
+ std::vector<std::string> files;
+ bool downloadDirectoryRecordBecauseOfFutureFiles = false;
+ // BLOCK
+ {
+ // read the contents...
+ DIR *dirHandle = 0;
+ try
+ {
+ dirHandle = ::opendir(rLocalPath.c_str());
+ if(dirHandle == 0)
+ {
+ // Report the error (logs and eventual email to administrator)
+ SetErrorWhenReadingFilesystemObject(rParams, rLocalPath.c_str());
+ // Ignore this directory for now.
+ return;
+ }
+
+ // Basic structure for checksum info
+ struct {
+ box_time_t mModificationTime;
+ box_time_t mAttributeModificationTime;
+ int64_t mSize;
+ // And then the name follows
+ } checksum_info;
+ // Be paranoid about structure packing
+ ::memset(&checksum_info, 0, sizeof(checksum_info));
+
+ struct dirent *en = 0;
+ struct stat st;
+ std::string filename;
+ while((en = ::readdir(dirHandle)) != 0)
+ {
+ // Don't need to use LinuxWorkaround_FinishDirentStruct(en, rLocalPath.c_str());
+ // on Linux, as a stat is performed to get all this info
+
+ if(en->d_name[0] == '.' &&
+ (en->d_name[1] == '\0' || (en->d_name[1] == '.' && en->d_name[2] == '\0')))
+ {
+ // ignore, it's . or ..
+ continue;
+ }
+
+ // Stat file to get info
+ filename = rLocalPath + DIRECTORY_SEPARATOR + en->d_name;
+ if(::lstat(filename.c_str(), &st) != 0)
+ {
+ TRACE1("Stat failed for '%s' (contents)\n", filename.c_str());
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ int type = st.st_mode & S_IFMT;
+ if(type == S_IFREG || type == S_IFLNK)
+ {
+ // File or symbolic link
+
+ // Exclude it?
+ if(rParams.mrContext.ExcludeFile(filename))
+ {
+ // Next item!
+ continue;
+ }
+
+ // Store on list
+ files.push_back(std::string(en->d_name));
+ }
+ else if(type == S_IFDIR)
+ {
+ // Directory
+
+ // Exclude it?
+ if(rParams.mrContext.ExcludeDir(filename))
+ {
+ // Next item!
+ continue;
+ }
+
+ // Store on list
+ dirs.push_back(std::string(en->d_name));
+ }
+ else
+ {
+ continue;
+ }
+
+ // Here if the object is something to back up (file, symlink or dir, not excluded)
+ // So make the information for adding to the checksum
+ checksum_info.mModificationTime = FileModificationTime(st);
+ checksum_info.mAttributeModificationTime = FileAttrModificationTime(st);
+ checksum_info.mSize = st.st_size;
+ currentStateChecksum.Add(&checksum_info, sizeof(checksum_info));
+ currentStateChecksum.Add(en->d_name, strlen(en->d_name));
+
+ // If the file has been modified madly into the future, download the
+ // directory record anyway to ensure that it doesn't get uploaded
+ // every single time the disc is scanned.
+ if(checksum_info.mModificationTime > rParams.mUploadAfterThisTimeInTheFuture)
+ {
+ downloadDirectoryRecordBecauseOfFutureFiles = true;
+ // Log that this has happened
+ if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
+ {
+ ::syslog(LOG_ERR, "Some files have modification times excessively in the future. Check clock syncronisation.\n");
+ ::syslog(LOG_ERR, "Example file (only one shown) : %s\n", filename.c_str());
+ rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
+ }
+ }
+ }
+
+ if(::closedir(dirHandle) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ dirHandle = 0;
+ }
+ catch(...)
+ {
+ if(dirHandle != 0)
+ {
+ ::closedir(dirHandle);
+ }
+ throw;
+ }
+ }
+
+ // Finish off the checksum, and compare with the one currently stored
+ bool checksumDifferent = true;
+ currentStateChecksum.Finish();
+ if(mInitialSyncDone && currentStateChecksum.DigestMatches(mStateChecksum))
+ {
+ // The checksum is the same, and there was one to compare with
+ checksumDifferent = false;
+ }
+
+ // Pointer to potentially downloaded store directory info
+ BackupStoreDirectory *pdirOnStore = 0;
+
+ try
+ {
+ // Want to get the directory listing?
+ if(ThisDirHasJustBeenCreated)
+ {
+ // Avoid sending another command to the server when we know it's empty
+ pdirOnStore = new BackupStoreDirectory(mObjectID, ContainingDirectoryID);
+ }
+ else
+ {
+ // Consider asking the store for it
+ if(!mInitialSyncDone || checksumDifferent || downloadDirectoryRecordBecauseOfFutureFiles)
+ {
+ pdirOnStore = FetchDirectoryListing(rParams);
+ }
+ }
+
+ // Make sure the attributes are up to date -- if there's space on the server
+ // and this directory has not just been created (because it's attributes will be correct in this case)
+ // and the checksum is different, implying they *MIGHT* be different.
+ if((!ThisDirHasJustBeenCreated) && checksumDifferent && (!rParams.mrContext.StorageLimitExceeded()))
+ {
+ UpdateAttributes(rParams, pdirOnStore, rLocalPath);
+ }
+
+ // Create the list of pointers to directory entries
+ std::vector<BackupStoreDirectory::Entry *> entriesLeftOver;
+ if(pdirOnStore)
+ {
+ entriesLeftOver.resize(pdirOnStore->GetNumberOfEntries(), 0);
+ BackupStoreDirectory::Iterator i(*pdirOnStore);
+ // Copy in pointers to all the entries
+ for(unsigned int l = 0; l < pdirOnStore->GetNumberOfEntries(); ++l)
+ {
+ entriesLeftOver[l] = i.Next();
+ }
+ }
+
+ // Do the directory reading
+ bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath, pdirOnStore, entriesLeftOver, files, dirs);
+
+ // LAST THING! (think exception safety)
+ // Store the new checksum -- don't fetch things unnecessarily in the future
+ // But... only if 1) the storage limit isn't exceeded -- make sure things are done again if
+ // the directory is modified later
+ // and 2) All the objects within the directory were stored successfully.
+ if(!rParams.mrContext.StorageLimitExceeded() && updateCompleteSuccess)
+ {
+ currentStateChecksum.CopyDigestTo(mStateChecksum);
+ }
+ }
+ catch(...)
+ {
+ // Bad things have happened -- clean up
+ if(pdirOnStore != 0)
+ {
+ delete pdirOnStore;
+ pdirOnStore = 0;
+ }
+
+ // Set things so that we get a full go at stuff later
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+
+ throw;
+ }
+
+ // Clean up directory on store
+ if(pdirOnStore != 0)
+ {
+ delete pdirOnStore;
+ pdirOnStore = 0;
+ }
+
+ // Flag things as having happened.
+ mInitialSyncDone = true;
+ mSyncDone = true;
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &)
+// Purpose: Fetch the directory listing of this directory from the store.
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+BackupStoreDirectory *BackupClientDirectoryRecord::FetchDirectoryListing(BackupClientDirectoryRecord::SyncParams &rParams)
+{
+ BackupStoreDirectory *pdir = 0;
+
+ try
+ {
+ // Get connection to store
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Query the directory
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
+ mObjectID,
+ BackupProtocolClientListDirectory::Flags_INCLUDE_EVERYTHING, // both files and directories
+ BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
+ true /* want attributes */));
+
+ // Retrieve the directory from the stream following
+ pdir = new BackupStoreDirectory;
+ ASSERT(pdir != 0);
+ std::auto_ptr<IOStream> dirstream(connection.ReceiveStream());
+ pdir->ReadFromStream(*dirstream, connection.GetTimeout());
+ }
+ catch(...)
+ {
+ delete pdir;
+ pdir = 0;
+ throw;
+ }
+
+ return pdir;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &, const std::string &)
+// Purpose: Sets the attributes of the directory on the store, if necessary
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath)
+{
+ // Get attributes for the directory
+ BackupClientFileAttributes attr;
+ box_time_t attrModTime = 0;
+ attr.ReadAttributes(rLocalPath.c_str(), true /* directories have zero mod times */,
+ 0 /* no modification time */, &attrModTime);
+
+ // Assume attributes need updating, unless proved otherwise
+ bool updateAttr = true;
+
+ // Got a listing to compare with?
+ ASSERT(pDirOnStore == 0 || (pDirOnStore != 0 && pDirOnStore->HasAttributes()));
+ if(pDirOnStore != 0 && pDirOnStore->HasAttributes())
+ {
+ const StreamableMemBlock &storeAttrEnc(pDirOnStore->GetAttributes());
+ // Explict decryption
+ BackupClientFileAttributes storeAttr(storeAttrEnc);
+ // Compare the attributes
+ if(attr.Compare(storeAttr, true, true /* ignore both modification times */))
+ {
+ // No update necessary
+ updateAttr = false;
+ }
+ }
+
+ // Update them?
+ if(updateAttr)
+ {
+ // Get connection to store
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Exception thrown if this doesn't work
+ MemBlockStream attrStream(attr);
+ connection.QueryChangeDirAttributes(mObjectID, attrModTime, attrStream);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &, const std::string &, BackupStoreDirectory *, std::vector<BackupStoreDirectory::Entry *> &)
+// Purpose: Update the items stored on the server. The rFiles vector will be erased after it's used to save space.
+// Returns true if all items were updated successfully. (If not, the failures will have been logged).
+// Created: 2003/10/09
+//
+// --------------------------------------------------------------------------
+bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
+ std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs)
+{
+ bool allUpdatedSuccessfully = true;
+
+ // Decrypt all the directory entries.
+ // It would be nice to be able to just compare the encrypted versions, however this doesn't work
+ // in practise because there can be multiple encodings of the same filename using different
+ // methods (although each method will result in the same string for the same filename.) This
+ // happens when the server fixes a broken store, and gives plain text generated filenames.
+ // So if we didn't do things like this, then you wouldn't be able to recover from bad things
+ // happening with the server.
+ DecryptedEntriesMap_t decryptedEntries;
+ if(pDirOnStore != 0)
+ {
+ BackupStoreDirectory::Iterator i(*pDirOnStore);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = i.Next()) != 0)
+ {
+ decryptedEntries[BackupStoreFilenameClear(en->GetName()).GetClearFilename()] = en;
+ }
+ }
+
+ // Do files
+ for(std::vector<std::string>::const_iterator f = rFiles.begin();
+ f != rFiles.end(); ++f)
+ {
+ // Filename of this file
+ std::string filename(rLocalPath + DIRECTORY_SEPARATOR + *f);
+
+ // Get relevant info about file
+ box_time_t modTime = 0;
+ uint64_t attributesHash = 0;
+ int64_t fileSize = 0;
+ ino_t inodeNum = 0;
+ bool hasMultipleHardLinks = true;
+ // BLOCK
+ {
+ // Stat the file
+ struct stat st;
+ if(::lstat(filename.c_str(), &st) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ // Extract required data
+ modTime = FileModificationTime(st);
+ fileSize = st.st_size;
+ inodeNum = st.st_ino;
+ hasMultipleHardLinks = (st.st_nlink > 1);
+ attributesHash = BackupClientFileAttributes::GenerateAttributeHash(st, *f);
+ }
+
+ // See if it's in the listing (if we have one)
+ BackupStoreFilenameClear storeFilename(*f);
+ BackupStoreDirectory::Entry *en = 0;
+ int64_t latestObjectID = 0;
+ if(pDirOnStore != 0)
+ {
+ DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*f));
+ if(i != decryptedEntries.end())
+ {
+ en = i->second;
+ latestObjectID = en->GetObjectID();
+ }
+ }
+
+ // Check that the entry which might have been found is in fact a file
+ if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == 0))
+ {
+ // Directory exists in the place of this file -- sort it out
+ RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore, en->GetObjectID(), *f);
+ en = 0;
+ }
+
+ // Check for renaming?
+ if(pDirOnStore != 0 && en == 0)
+ {
+ // We now know...
+ // 1) File has just been added
+ // 2) It's not in the store
+
+ // Do we know about the inode number?
+ const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ int64_t renameObjectID = 0, renameInDirectory = 0;
+ if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ {
+ // Look up on the server to get the name, to build the local filename
+ std::string localPotentialOldName;
+ bool isDir = false;
+ bool isCurrentVersion = false;
+ box_time_t srvModTime = 0, srvAttributesHash = 0;
+ BackupStoreFilenameClear oldLeafname;
+ if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
+ {
+ // Only interested if it's a file and the latest version
+ if(!isDir && isCurrentVersion)
+ {
+ // Check that the object we found in the ID map doesn't exist on disc
+ struct stat st;
+ if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ {
+ // Doesn't exist locally, but does exist on the server.
+ // Therefore we can safely rename it to this new file.
+
+ // Get the connection to the server
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Rename the existing files (ie include old versions) on the server
+ connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
+ BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
+ storeFilename);
+
+ // Stop the attempt to delete the file in the original location
+ BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
+
+ // Create new entry in the directory for it
+ // -- will be near enough what's actually on the server for the rest to work.
+ en = pDirOnStore->AddEntry(storeFilename, srvModTime, renameObjectID, 0 /* size in blocks unknown, but not needed */,
+ BackupStoreDirectory::Entry::Flags_File, srvAttributesHash);
+
+ // Store the object ID for the inode lookup map later
+ latestObjectID = renameObjectID;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Is it in the mPendingEntries list?
+ box_time_t pendingFirstSeenTime = 0; // ie not seen
+ if(mpPendingEntries != 0)
+ {
+ std::map<std::string, box_time_t>::const_iterator i(mpPendingEntries->find(*f));
+ if(i != mpPendingEntries->end())
+ {
+ // found it -- set flag
+ pendingFirstSeenTime = i->second;
+ }
+ }
+
+ // If pDirOnStore == 0, then this must have been after an initial sync:
+ ASSERT(pDirOnStore != 0 || mInitialSyncDone);
+ // So, if pDirOnStore == 0, then we know that everything before syncPeriodStart
+ // is either on the server, or in the toupload list. If the directory had changed,
+ // we'd have got a directory listing.
+ //
+ // At this point, if (pDirOnStore == 0 && en == 0), we can assume it's on the server with a
+ // mod time < syncPeriodStart, or didn't exist before that time.
+ //
+ // But if en != 0, then we need to compare modification times to avoid uploading it again.
+
+ // Need to update?
+ //
+ // Condition for upload:
+ // modifiction time within sync period
+ // if it's been seen before but not uploaded, is the time from this first sight longer than the MaxUploadWait
+ // and if we know about it from a directory listing, that it hasn't got the same upload time as on the store
+ if(
+ (
+ // Check the file modified within the acceptable time period we're checking
+ // If the file isn't on the server, the acceptable time starts at zero.
+ // Check pDirOnStore and en, because if we didn't download a directory listing,
+ // pDirOnStore will be zero, but we know it's on the server.
+ ( ((pDirOnStore != 0 && en == 0) || (modTime >= rParams.mSyncPeriodStart)) && modTime < rParams.mSyncPeriodEnd)
+
+ // However, just in case things are continually modified, we check the first seen time.
+ // The two compares of syncPeriodEnd and pendingFirstSeenTime are because the values are unsigned.
+ || (pendingFirstSeenTime != 0 &&
+ (rParams.mSyncPeriodEnd > pendingFirstSeenTime)
+ && ((rParams.mSyncPeriodEnd - pendingFirstSeenTime) > rParams.mMaxUploadWait))
+
+ // Then make sure that if files are added with a time less than the sync period start
+ // (which can easily happen on file server), it gets uploaded. The directory contents checksum
+ // will pick up the fact it has been added, so the store listing will be available when this happens.
+ || ((modTime <= rParams.mSyncPeriodStart) && (en != 0) && (en->GetModificationTime() != modTime))
+
+ // And just to catch really badly off clocks in the future for file server clients,
+ // just upload the file if it's madly in the future.
+ || (modTime > rParams.mUploadAfterThisTimeInTheFuture)
+ )
+ // But even then, only upload it if the mod time locally is different to that on the server.
+ && (en == 0 || en->GetModificationTime() != modTime))
+ {
+ // Make sure we're connected -- must connect here so we know whether
+ // the storage limit has been exceeded, and hence whether or not
+ // to actually upload the file.
+ rParams.mrContext.GetConnection();
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Upload the file to the server, recording the object ID it returns
+ bool noPreviousVersionOnServer = ((pDirOnStore != 0) && (en == 0));
+
+ // Surround this in a try/catch block, to catch errrors, but still continue
+ bool uploadSuccess = false;
+ try
+ {
+ latestObjectID = UploadFile(rParams, filename, storeFilename, fileSize, modTime, attributesHash, noPreviousVersionOnServer);
+ uploadSuccess = true;
+ }
+ catch(ConnectionException &e)
+ {
+ // Connection errors should just be passed on to the main handler, retries
+ // would probably just cause more problems.
+ throw;
+ }
+ catch(BoxException &e)
+ {
+ // an error occured -- make return code false, to show error in directory
+ allUpdatedSuccessfully = false;
+ // Log it.
+ SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
+ // Log error.
+ ::syslog(LOG_ERR, "Error code when uploading was (%d/%d), %s", e.GetType(), e.GetSubType(), e.what());
+ }
+
+ // Update structures if the file was uploaded successfully.
+ if(uploadSuccess)
+ {
+ // delete from pending entries
+ if(pendingFirstSeenTime != 0 && mpPendingEntries != 0)
+ {
+ mpPendingEntries->erase(*f);
+ }
+ }
+ }
+ }
+ else if(en != 0 && en->GetAttributesHash() != attributesHash)
+ {
+ // Attributes have probably changed, upload them again.
+ // If the attributes have changed enough, the directory hash will have changed too,
+ // and so the dir will have been downloaded, and the entry will be available.
+
+ // Get connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(!rParams.mrContext.StorageLimitExceeded())
+ {
+ // Update store
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
+ MemBlockStream attrStream(attr);
+ connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
+ }
+ }
+
+ if(modTime >= rParams.mSyncPeriodEnd)
+ {
+ // Allocate?
+ if(mpPendingEntries == 0)
+ {
+ mpPendingEntries = new std::map<std::string, box_time_t>;
+ }
+ // Adding to mPendingEntries list
+ if(pendingFirstSeenTime == 0)
+ {
+ // Haven't seen this before -- add to list!
+ (*mpPendingEntries)[*f] = modTime;
+ }
+ }
+
+ // Zero pointer in rEntriesLeftOver, if we have a pointer to zero
+ if(en != 0)
+ {
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] == en)
+ {
+ rEntriesLeftOver[l] = 0;
+ break;
+ }
+ }
+ }
+
+ // Does this file need an entry in the ID map?
+ if(fileSize >= rParams.mFileTrackingSizeThreshold)
+ {
+ // Get the map
+ BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+
+ // Need to get an ID from somewhere...
+ if(latestObjectID != 0)
+ {
+ // Use this one
+ idMap.AddToMap(inodeNum, latestObjectID, mObjectID /* containing directory */);
+ }
+ else
+ {
+ // Don't know it -- haven't sent anything to the store, and didn't get a listing.
+ // Look it up in the current map, and if it's there, use that.
+ const BackupClientInodeToIDMap &currentIDMap(rParams.mrContext.GetCurrentIDMap());
+ int64_t objid = 0, dirid = 0;
+ if(currentIDMap.Lookup(inodeNum, objid, dirid))
+ {
+ // Found
+ ASSERT(dirid == mObjectID);
+ // NOTE: If the above assert fails, an inode number has been reused by the OS,
+ // or there is a problem somewhere. If this happened on a short test run, look
+ // into it. However, in a long running process this may happen occasionally and
+ // not indiciate anything wrong.
+ // Run the release version for real life use, where this check is not made.
+ idMap.AddToMap(inodeNum, objid, mObjectID /* containing directory */);
+ }
+ }
+ }
+ }
+
+ // Erase contents of files to save space when recursing
+ rFiles.clear();
+
+ // Delete the pending entries, if the map is entry
+ if(mpPendingEntries != 0 && mpPendingEntries->size() == 0)
+ {
+ TRACE1("Deleting mpPendingEntries from dir ID %lld\n", mObjectID);
+ delete mpPendingEntries;
+ mpPendingEntries = 0;
+ }
+
+ // Do directories
+ for(std::vector<std::string>::const_iterator d = rDirs.begin();
+ d != rDirs.end(); ++d)
+ {
+ // Get the local filename
+ std::string dirname(rLocalPath + DIRECTORY_SEPARATOR + *d);
+
+ // See if it's in the listing (if we have one)
+ BackupStoreFilenameClear storeFilename(*d);
+ BackupStoreDirectory::Entry *en = 0;
+ if(pDirOnStore != 0)
+ {
+ DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*d));
+ if(i != decryptedEntries.end())
+ {
+ en = i->second;
+ }
+ }
+
+ // Check that the entry which might have been found is in fact a directory
+ if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == 0))
+ {
+ // Entry exists, but is not a directory. Bad. Get rid of it.
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ connection.QueryDeleteFile(mObjectID /* in directory */, storeFilename);
+
+ // Nothing found
+ en = 0;
+ }
+
+ // Flag for having created directory, so can optimise the recusive call not to
+ // read it again, because we know it's empty.
+ bool haveJustCreatedDirOnServer = false;
+
+ // Next, see if it's in the list of sub directories
+ BackupClientDirectoryRecord *psubDirRecord = 0;
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(*d));
+ if(e != mSubDirectories.end())
+ {
+ // In the list, just use this pointer
+ psubDirRecord = e->second;
+ }
+ else if(!rParams.mrContext.StorageLimitExceeded()) // know we've got a connection if we get this far, as dir will have been modified.
+ {
+ // Note: only think about adding directory records if there's space left on the server.
+ // If there isn't, this step will be repeated when there is some available.
+
+ // Need to create the record. But do we need to create the directory on the server?
+ int64_t subDirObjectID = 0;
+ if(en != 0)
+ {
+ // No. Exists on the server, and we know about it from the listing.
+ subDirObjectID = en->GetObjectID();
+ }
+ else
+ {
+ // Yes, creation required!
+ // It is known that the it doesn't exist:
+ // if pDirOnStore == 0, then the directory has had an initial sync, and hasn't been modified.
+ // so it has definately been created already.
+ // if en == 0 but pDirOnStore != 0, well... obviously it doesn't exist.
+
+ // Get attributes
+ box_time_t attrModTime = 0;
+ ino_t inodeNum = 0;
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(dirname.c_str(), true /* directories have zero mod times */,
+ 0 /* not interested in mod time */, &attrModTime, 0 /* not file size */,
+ &inodeNum);
+
+ // Check to see if the directory been renamed
+ // First, do we have a record in the ID map?
+ int64_t renameObjectID = 0, renameInDirectory = 0;
+ bool renameDir = false;
+ const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ {
+ // Look up on the server to get the name, to build the local filename
+ std::string localPotentialOldName;
+ bool isDir = false;
+ bool isCurrentVersion = false;
+ if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
+ {
+ // Only interested if it's a directory
+ if(isDir && isCurrentVersion)
+ {
+ // Check that the object doesn't exist already
+ struct stat st;
+ if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ {
+ // Doesn't exist locally, but does exist on the server.
+ // Therefore we can safely rename it.
+ renameDir = true;
+ }
+ }
+ }
+ }
+
+ // Get connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Don't do a check for storage limit exceeded here, because if we get to this
+ // stage, a connection will have been opened, and the status known, so the check
+ // in the else if(...) above will be correct.
+
+ // Build attribute stream for sending
+ MemBlockStream attrStream(attr);
+
+ if(renameDir)
+ {
+ // Rename the existing directory on the server
+ connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
+ BackupProtocolClientMoveObject::Flags_MoveAllWithSameName | BackupProtocolClientMoveObject::Flags_AllowMoveOverDeletedObject,
+ storeFilename);
+
+ // Put the latest attributes on it
+ connection.QueryChangeDirAttributes(renameObjectID, attrModTime, attrStream);
+
+ // Stop it being deleted later
+ BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ rdelList.StopDirectoryDeletion(renameObjectID);
+
+ // This is the ID for the renamed directory
+ subDirObjectID = renameObjectID;
+ }
+ else
+ {
+ // Create a new directory
+ std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
+ mObjectID, attrModTime, storeFilename, attrStream));
+ subDirObjectID = dirCreate->GetObjectID();
+
+ // Flag as having done this for optimisation later
+ haveJustCreatedDirOnServer = true;
+ }
+ }
+
+ // New an object for this
+ psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
+
+ // Store in list
+ try
+ {
+ mSubDirectories[*d] = psubDirRecord;
+ }
+ catch(...)
+ {
+ delete psubDirRecord;
+ psubDirRecord = 0;
+ throw;
+ }
+ }
+
+ ASSERT(psubDirRecord != 0 || rParams.mrContext.StorageLimitExceeded());
+
+ if(psubDirRecord)
+ {
+ // Sync this sub directory too
+ psubDirRecord->SyncDirectory(rParams, mObjectID, dirname, haveJustCreatedDirOnServer);
+ }
+
+ // Zero pointer in rEntriesLeftOver, if we have a pointer to zero
+ if(en != 0)
+ {
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] == en)
+ {
+ rEntriesLeftOver[l] = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ // Delete everything which is on the store, but not on disc
+ for(unsigned int l = 0; l < rEntriesLeftOver.size(); ++l)
+ {
+ if(rEntriesLeftOver[l] != 0)
+ {
+ BackupStoreDirectory::Entry *en = rEntriesLeftOver[l];
+
+ // These entries can't be deleted immediately, as it would prevent
+ // renaming and moving of objects working properly. So we add them
+ // to a list, which is actually deleted at the very end of the session.
+ // If there's an error during the process, it doesn't matter if things
+ // aren't actually deleted, as the whole state will be reset anyway.
+ BackupClientDeleteList &rdel(rParams.mrContext.GetDeleteList());
+
+ // Delete this entry -- file or directory?
+ if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
+ {
+ // Set a pending deletion for the file
+ rdel.AddFileDelete(mObjectID, en->GetName());
+ }
+ else if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
+ {
+ // Set as a pending deletion for the directory
+ rdel.AddDirectoryDelete(en->GetObjectID());
+
+ // If there's a directory record for it in the sub directory map, delete it now
+ BackupStoreFilenameClear dirname(en->GetName());
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(dirname.GetClearFilename()));
+ if(e != mSubDirectories.end())
+ {
+ // Carefully delete the entry from the map
+ BackupClientDirectoryRecord *rec = e->second;
+ mSubDirectories.erase(e);
+ delete rec;
+ TRACE2("Deleted directory record for %s/%s\n", rLocalPath.c_str(), dirname.GetClearFilename().c_str());
+ }
+ }
+ }
+ }
+
+ // Return success flag (will be false if some files failed)
+ return allUpdatedSuccessfully;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &, BackupStoreDirectory *, int64_t, const std::string &)
+// Purpose: Called to resolve difficulties when a directory is found on the
+// store where a file is to be uploaded.
+// Created: 9/7/04
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename)
+{
+ // First, delete the directory
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ connection.QueryDeleteDirectory(ObjectID);
+
+ // Then, delete any directory record
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(rFilename));
+ if(e != mSubDirectories.end())
+ {
+ // A record exists for this, remove it
+ BackupClientDirectoryRecord *psubDirRecord = e->second;
+ mSubDirectories.erase(e);
+
+ // And delete the object
+ delete psubDirRecord;
+ }
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &, const std::string &, const BackupStoreFilename &, int64_t, box_time_t, box_time_t, bool)
+// Purpose: Private. Upload a file to the server -- may send a patch instead of the whole thing
+// Created: 20/1/04
+//
+// --------------------------------------------------------------------------
+int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer)
+{
+ // Get the connection
+ BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+
+ // Info
+ int64_t objID = 0;
+ bool doNormalUpload = true;
+
+ // Use a try block to catch store full errors
+ try
+ {
+ // Might an old version be on the server, and is the file size over the diffing threshold?
+ if(!NoPreviousVersionOnServer && FileSize >= rParams.mDiffingUploadSizeThreshold)
+ {
+ // YES -- try to do diff, if possible
+ // First, query the server to see if there's an old version available
+ std::auto_ptr<BackupProtocolClientSuccess> getBlockIndex(connection.QueryGetBlockIndexByName(mObjectID, rStoreFilename));
+ int64_t diffFromID = getBlockIndex->GetObjectID();
+
+ if(diffFromID != 0)
+ {
+ // Found an old version -- get the index
+ std::auto_ptr<IOStream> blockIndexStream(connection.ReceiveStream());
+
+ // Diff the file
+ bool isCompletelyDifferent = false;
+ std::auto_ptr<IOStream> patchStream(BackupStoreFile::EncodeFileDiff(rFilename.c_str(),
+ mObjectID, /* containing directory */
+ rStoreFilename, diffFromID, *blockIndexStream,
+ connection.GetTimeout(), 0 /* not interested in the modification time */, &isCompletelyDifferent));
+
+ // Upload the patch to the store
+ std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
+ AttributesHash, isCompletelyDifferent?(0):(diffFromID), rStoreFilename, *patchStream));
+
+ // Don't attempt to upload it again!
+ doNormalUpload = false;
+ }
+ }
+
+ if(doNormalUpload)
+ {
+ // below threshold or nothing to diff from, so upload whole
+
+ // Prepare to upload, getting a stream which will encode the file as we go along
+ std::auto_ptr<IOStream> upload(BackupStoreFile::EncodeFile(rFilename.c_str(), mObjectID, rStoreFilename));
+
+ // Send to store
+ std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
+ AttributesHash, 0 /* no diff from file ID */, rStoreFilename, *upload));
+
+ // Get object ID from the result
+ objID = stored->GetObjectID();
+ }
+ }
+ catch(BoxException &e)
+ {
+ if(e.GetType() == ConnectionException::ExceptionType && e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
+ {
+ // Check and see what error the protocol has -- as it might be an error...
+ int type, subtype;
+ if(connection.GetLastError(type, subtype)
+ && type == BackupProtocolClientError::ErrorType
+ && subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
+ {
+ // The hard limit was exceeded on the server, notify!
+ rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ }
+ }
+
+ // Send the error on it's way
+ throw;
+ }
+
+ // Return the new object ID of this file
+ return objID;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(SyncParams &, const char *)
+// Purpose: Sets the error state when there were problems reading an object
+// from the filesystem.
+// Created: 29/3/04
+//
+// --------------------------------------------------------------------------
+void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClientDirectoryRecord::SyncParams &rParams, const char *Filename)
+{
+ // Zero hash, so it gets synced properly next time round.
+ ::memset(mStateChecksum, 0, sizeof(mStateChecksum));
+
+ // Log the error
+ ::syslog(LOG_ERR, "Backup object failed, error when reading %s", Filename);
+
+ // Mark that an error occured in the parameters object
+ rParams.mReadErrorsOnFilesystemObjects = true;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncParams::SyncParams(BackupClientContext &)
+// Purpose: Constructor
+// Created: 8/3/04
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext)
+ : mSyncPeriodStart(0),
+ mSyncPeriodEnd(0),
+ mMaxUploadWait(0),
+ mMaxFileTimeInFuture(99999999999999999LL),
+ mFileTrackingSizeThreshold(16*1024),
+ mDiffingUploadSizeThreshold(16*1024),
+ mrDaemon(rDaemon),
+ mrContext(rContext),
+ mReadErrorsOnFilesystemObjects(false),
+ mUploadAfterThisTimeInTheFuture(99999999999999999LL),
+ mHaveLoggedWarningAboutFutureFileTimes(false)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientDirectoryRecord::SyncParams::~SyncParams()
+// Purpose: Destructor
+// Created: 8/3/04
+//
+// --------------------------------------------------------------------------
+BackupClientDirectoryRecord::SyncParams::~SyncParams()
+{
+}
+
+
+
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.h b/bin/bbackupd/BackupClientDirectoryRecord.h
new file mode 100755
index 00000000..99354bc8
--- /dev/null
+++ b/bin/bbackupd/BackupClientDirectoryRecord.h
@@ -0,0 +1,115 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientDirectoryRecord.h
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTDIRECTORYRECORD__H
+#define BACKUPCLIENTDIRECTORYRECORD__H
+
+#include <string>
+#include <map>
+
+#include "BoxTime.h"
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreDirectory.h"
+#include "MD5Digest.h"
+
+class BackupClientContext;
+class BackupDaemon;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientDirectoryRecord
+// Purpose: Implementation of record about directory for backup client
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupClientDirectoryRecord
+{
+public:
+ BackupClientDirectoryRecord(int64_t ObjectID, const std::string &rSubDirName);
+ ~BackupClientDirectoryRecord();
+private:
+ BackupClientDirectoryRecord(const BackupClientDirectoryRecord &);
+public:
+
+ enum
+ {
+ UnknownDirectoryID = 0
+ };
+
+ // --------------------------------------------------------------------------
+ //
+ // Class
+ // Name: BackupClientDirectoryRecord::SyncParams
+ // Purpose: Holds parameters etc for directory syncing. Not passed as
+ // const, some parameters may be modified during sync.
+ // Created: 8/3/04
+ //
+ // --------------------------------------------------------------------------
+ class SyncParams
+ {
+ public:
+ SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext);
+ ~SyncParams();
+ private:
+ // No copying
+ SyncParams(const SyncParams&);
+ SyncParams &operator=(const SyncParams&);
+ public:
+
+ // Data members are public, as accessors are not justified here
+ box_time_t mSyncPeriodStart;
+ box_time_t mSyncPeriodEnd;
+ box_time_t mMaxUploadWait;
+ box_time_t mMaxFileTimeInFuture;
+ int32_t mFileTrackingSizeThreshold;
+ int32_t mDiffingUploadSizeThreshold;
+ BackupDaemon &mrDaemon;
+ BackupClientContext &mrContext;
+ bool mReadErrorsOnFilesystemObjects;
+
+ // Member variables modified by syncing process
+ box_time_t mUploadAfterThisTimeInTheFuture;
+ bool mHaveLoggedWarningAboutFutureFileTimes;
+ };
+
+ void SyncDirectory(SyncParams &rParams, int64_t ContainingDirectoryID, const std::string &rLocalPath,
+ bool ThisDirHasJustBeenCreated = false);
+
+private:
+ void DeleteSubDirectories();
+ BackupStoreDirectory *FetchDirectoryListing(SyncParams &rParams);
+ void UpdateAttributes(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath);
+ bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
+ std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs);
+ int64_t UploadFile(SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer);
+ void SetErrorWhenReadingFilesystemObject(SyncParams &rParams, const char *Filename);
+ void RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename);
+
+private:
+ int64_t mObjectID;
+ std::string mSubDirName;
+ bool mInitialSyncDone;
+ bool mSyncDone;
+
+ // Checksum of directory contents and attributes, used to detect changes
+ uint8_t mStateChecksum[MD5Digest::DigestLength];
+
+ std::map<std::string, box_time_t> *mpPendingEntries;
+ std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
+ // mpPendingEntries is a pointer rather than simple a member
+ // variables, because most of the time it'll be empty. This would waste a lot
+ // of memory because of STL allocation policies.
+};
+
+#endif // BACKUPCLIENTDIRECTORYRECORD__H
+
+
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.cpp b/bin/bbackupd/BackupClientInodeToIDMap.cpp
new file mode 100755
index 00000000..23e91eba
--- /dev/null
+++ b/bin/bbackupd/BackupClientInodeToIDMap.cpp
@@ -0,0 +1,279 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientInodeToIDMap.cpp
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#ifndef PLATFORM_BERKELEY_DB_NOT_SUPPORTED
+ // Include db headers and other OS files if they're needed for the disc implementation
+ #include <sys/types.h>
+ #include <fcntl.h>
+ #include <limits.h>
+ #ifdef PLATFORM_LINUX
+ #include "../../local/_linux_db.h"
+ #else
+ #include <db.h>
+ #endif
+ #include <sys/stat.h>
+#endif
+
+#define BACKIPCLIENTINODETOIDMAP_IMPLEMENTATION
+#include "BackupClientInodeToIDMap.h"
+
+#include "BackupStoreException.h"
+
+
+#include "MemLeakFindOn.h"
+
+// What type of Berkeley DB shall we use?
+#define TABLE_DATABASE_TYPE DB_HASH
+
+typedef struct
+{
+ int64_t mObjectID;
+ int64_t mInDirectory;
+} IDBRecord;
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::BackupClientInodeToIDMap()
+// Purpose: Constructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap::BackupClientInodeToIDMap()
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ : mReadOnly(true),
+ mEmpty(false),
+ dbp(0)
+#endif
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::~BackupClientInodeToIDMap()
+// Purpose: Destructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupClientInodeToIDMap::~BackupClientInodeToIDMap()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ if(dbp != 0)
+ {
+ dbp->close(dbp);
+ }
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Open(const char *, bool, bool)
+// Purpose: Open the database map, creating a file on disc to store everything
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::Open(const char *Filename, bool ReadOnly, bool CreateNew)
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ // Correct arguments?
+ ASSERT(!(CreateNew && ReadOnly));
+
+ // Correct usage?
+ ASSERT(dbp == 0);
+ ASSERT(!mEmpty);
+
+ // Open the database file
+ dbp = dbopen(Filename, (CreateNew?O_CREAT:0) | (ReadOnly?O_RDONLY:O_RDWR), S_IRUSR | S_IWUSR | S_IRGRP, TABLE_DATABASE_TYPE, NULL);
+ if(dbp == NULL)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+
+ // Read only flag
+ mReadOnly = ReadOnly;
+#endif
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::OpenEmpty()
+// Purpose: 'Open' this map. Not associated with a disc file. Useful for when a map
+// is required, but is against an empty file on disc which shouldn't be created.
+// Implies read only.
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::OpenEmpty()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ ASSERT(dbp == 0);
+ mEmpty = true;
+ mReadOnly = true;
+#endif
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Close()
+// Purpose: Close the database file
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::Close()
+{
+#ifndef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ if(dbp != 0)
+ {
+ if(dbp->close(dbp) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+ dbp = 0;
+ }
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::AddToMap(InodeRefType, int64_t, int64_t)
+// Purpose: Adds an entry to the map. Overwrites any existing entry.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupClientInodeToIDMap::AddToMap(InodeRefType InodeRef, int64_t ObjectID, int64_t InDirectory)
+{
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ mMap[InodeRef] = std::pair<int64_t, int64_t>(ObjectID, InDirectory);
+#else
+ if(mReadOnly)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapIsReadOnly);
+ }
+
+ if(dbp == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapNotOpen);
+ }
+
+ // Setup structures
+ IDBRecord rec;
+ rec.mObjectID = ObjectID;
+ rec.mInDirectory = InDirectory;
+
+ DBT key;
+ key.data = &InodeRef;
+ key.size = sizeof(InodeRef);
+
+ DBT data;
+ data.data = &rec;
+ data.size = sizeof(rec);
+
+ // Add to map (or replace existing entry)
+ if(dbp->put(dbp, &key, &data, 0) != 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ }
+#endif
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupClientInodeToIDMap::Lookup(InodeRefType, int64_t &, int64_t &) const
+// Purpose: Looks up an inode in the map, returning true if it exists, and the object
+// ids of it and the directory it's in the reference arguments.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupClientInodeToIDMap::Lookup(InodeRefType InodeRef, int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const
+{
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ std::map<InodeRefType, std::pair<int64_t, int64_t> >::const_iterator i(mMap.find(InodeRef));
+
+ // Found?
+ if(i == mMap.end())
+ {
+ return false;
+ }
+
+ // Yes. Return the details
+ rObjectIDOut = i->second.first;
+ rInDirectoryOut = i->second.second;
+ return true;
+#else
+ if(mEmpty)
+ {
+ // Map is empty
+ return false;
+ }
+
+ if(dbp == 0)
+ {
+ THROW_EXCEPTION(BackupStoreException, InodeMapNotOpen);
+ }
+
+ DBT key;
+ key.data = &InodeRef;
+ key.size = sizeof(InodeRef);
+
+ DBT data;
+ data.data = 0;
+ data.size = 0;
+
+ switch(dbp->get(dbp, &key, &data, 0))
+ {
+ case 1: // key not in file
+ return false;
+
+ case -1: // error
+ default: // not specified in docs
+ THROW_EXCEPTION(BackupStoreException, BerkelyDBFailure);
+ return false;
+
+ case 0: // success, found it
+ break;
+ }
+
+ // Check for sensible return
+ if(key.data == 0 || data.size != sizeof(IDBRecord))
+ {
+ // Assert in debug version
+ ASSERT(key.data == 0 || data.size != sizeof(IDBRecord));
+
+ // Invalid entries mean it wasn't found
+ return false;
+ }
+
+ // Data alignment isn't guarentted to be on a suitable bounday
+ IDBRecord rec;
+ ::memcpy(&rec, data.data, sizeof(rec));
+
+ // Return data
+ rObjectIDOut = rec.mObjectID;
+ rInDirectoryOut = rec.mInDirectory;
+
+ // Don't have to worry about freeing the returned data
+
+ // Found
+ return true;
+#endif
+}
+
+
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.h b/bin/bbackupd/BackupClientInodeToIDMap.h
new file mode 100755
index 00000000..1ea7755d
--- /dev/null
+++ b/bin/bbackupd/BackupClientInodeToIDMap.h
@@ -0,0 +1,67 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupClientInodeToIDMap.h
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPCLIENTINODETOIDMAP_H
+#define BACKUPCLIENTINODETOIDMAP__H
+
+#include <sys/types.h>
+
+#include <map>
+#include <utility>
+
+// Use in memory implementation if there isn't access to the Berkely DB on this platform
+#ifdef PLATFORM_BERKELEY_DB_NOT_SUPPORTED
+ #define BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+#endif
+
+typedef ino_t InodeRefType;
+
+// avoid having to include the DB files when not necessary
+#ifndef BACKIPCLIENTINODETOIDMAP_IMPLEMENTATION
+ class DB;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupClientInodeToIDMap
+// Purpose: Map of inode numbers to file IDs on the store
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+class BackupClientInodeToIDMap
+{
+public:
+ BackupClientInodeToIDMap();
+ ~BackupClientInodeToIDMap();
+private:
+ BackupClientInodeToIDMap(const BackupClientInodeToIDMap &rToCopy); // not allowed
+public:
+
+ void Open(const char *Filename, bool ReadOnly, bool CreateNew);
+ void OpenEmpty();
+
+ void AddToMap(InodeRefType InodeRef, int64_t ObjectID, int64_t InDirectory);
+ bool Lookup(InodeRefType InodeRef, int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const;
+
+ void Close();
+
+private:
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ std::map<InodeRefType, std::pair<int64_t, int64_t> > mMap;
+#else
+ bool mReadOnly;
+ bool mEmpty;
+ DB *dbp; // C style interface, use notation from documentation
+#endif
+};
+
+#endif // BACKUPCLIENTINODETOIDMAP__H
+
+
diff --git a/bin/bbackupd/BackupDaemon.cpp b/bin/bbackupd/BackupDaemon.cpp
new file mode 100755
index 00000000..7aa21a87
--- /dev/null
+++ b/bin/bbackupd/BackupDaemon.cpp
@@ -0,0 +1,1624 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.cpp
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+
+#include <unistd.h>
+#include <syslog.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <signal.h>
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ #include <mntent.h>
+#endif
+#include <sys/wait.h>
+
+#include "Configuration.h"
+#include "IOStream.h"
+#include "MemBlockStream.h"
+#include "CommonException.h"
+
+#include "SSLLib.h"
+#include "TLSContext.h"
+
+#include "BackupDaemon.h"
+#include "BackupDaemonConfigVerify.h"
+#include "BackupClientContext.h"
+#include "BackupClientDirectoryRecord.h"
+#include "BackupStoreDirectory.h"
+#include "BackupClientFileAttributes.h"
+#include "BackupStoreFilenameClear.h"
+#include "BackupClientInodeToIDMap.h"
+#include "autogen_BackupProtocolClient.h"
+#include "BackupClientCryptoKeys.h"
+#include "BannerText.h"
+#include "BackupStoreFile.h"
+#include "Random.h"
+#include "ExcludeList.h"
+#include "BackupClientMakeExcludeList.h"
+#include "IOStreamGetLine.h"
+#include "Utils.h"
+#include "FileStream.h"
+#include "BackupStoreException.h"
+#include "BackupStoreConstants.h"
+#include "LocalProcessStream.h"
+#include "IOStreamGetLine.h"
+#include "Conversion.h"
+
+#include "MemLeakFindOn.h"
+
+#define MAX_SLEEP_TIME ((unsigned int)1024)
+
+// Make the actual sync period have a little bit of extra time, up to a 64th of the main sync period.
+// This prevents repetative cycles of load on the server
+#define SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY 6
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::BackupDaemon()
+// Purpose: constructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupDaemon::BackupDaemon()
+ : mState(BackupDaemon::State_Initialising),
+ mpCommandSocketInfo(0),
+ mDeleteUnusedRootDirEntriesAfter(0)
+{
+ // Only ever one instance of a daemon
+ SSLLib::Initialise();
+
+ // Initialise notifcation sent status
+ for(int l = 0; l <= NotifyEvent__MAX; ++l)
+ {
+ mNotificationsSent[l] = false;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::~BackupDaemon()
+// Purpose: Destructor
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+BackupDaemon::~BackupDaemon()
+{
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DaemonName()
+// Purpose: Get name of daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+const char *BackupDaemon::DaemonName() const
+{
+ return "bbackupd";
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DaemonBanner()
+// Purpose: Daemon banner
+// Created: 1/1/04
+//
+// --------------------------------------------------------------------------
+const char *BackupDaemon::DaemonBanner() const
+{
+#ifndef NDEBUG
+ // Don't display banner in debug builds
+ return 0;
+#else
+ return BANNER_TEXT("Backup Client");
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::GetConfigVerify()
+// Purpose: Get configuration specification
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+const ConfigurationVerify *BackupDaemon::GetConfigVerify() const
+{
+ // Defined elsewhere
+ return &BackupDaemonConfigVerify;
+}
+
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupInInitialProcess()
+// Purpose: Platforms with non-checkable credientals on local sockets only.
+// Prints a warning if the command socket is used.
+// Created: 25/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupInInitialProcess()
+{
+ // Print a warning on this platform if the CommandSocket is used.
+ if(GetConfiguration().KeyExists("CommandSocket"))
+ {
+ printf(
+ "============================================================================================\n" \
+ "SECURITY WARNING: This platform cannot check the credentials of connections to the\n" \
+ "command socket. This is a potential DoS security problem.\n" \
+ "Remove the CommandSocket directive from the bbackupd.conf file if bbackupctl is not used.\n" \
+ "============================================================================================\n"
+ );
+ }
+}
+#endif
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteAllLocations()
+// Purpose: Deletes all records stored
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteAllLocations()
+{
+ // Run through, and delete everything
+ for(std::vector<Location *>::iterator i = mLocations.begin();
+ i != mLocations.end(); ++i)
+ {
+ delete *i;
+ }
+
+ // Clear the contents of the map, so it is empty
+ mLocations.clear();
+
+ // And delete everything from the assoicated mount vector
+ mIDMapMounts.clear();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run()
+// Purpose: Run function for daemon
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run()
+{
+ // Ignore SIGPIPE (so that if a command connection is broken, the daemon doesn't terminate)
+ ::signal(SIGPIPE, SIG_IGN);
+
+ // Create a command socket?
+ const Configuration &conf(GetConfiguration());
+ if(conf.KeyExists("CommandSocket"))
+ {
+ // Yes, create a local UNIX socket
+ mpCommandSocketInfo = new CommandSocketInfo;
+ const char *socketName = conf.GetKeyValue("CommandSocket").c_str();
+ ::unlink(socketName);
+ mpCommandSocketInfo->mListeningSocket.Listen(Socket::TypeUNIX, socketName);
+ }
+
+ // Handle things nicely on exceptions
+ try
+ {
+ Run2();
+ }
+ catch(...)
+ {
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+
+ throw;
+ }
+
+ // Clean up
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run2()
+// Purpose: Run function for daemon (second stage)
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run2()
+{
+ // Read in the certificates creating a TLS context
+ TLSContext tlsContext;
+ const Configuration &conf(GetConfiguration());
+ std::string certFile(conf.GetKeyValue("CertificateFile"));
+ std::string keyFile(conf.GetKeyValue("PrivateKeyFile"));
+ std::string caFile(conf.GetKeyValue("TrustedCAsFile"));
+ tlsContext.Initialise(false /* as client */, certFile.c_str(), keyFile.c_str(), caFile.c_str());
+
+ // Set up the keys for various things
+ BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+
+ // Set maximum diffing time?
+ if(conf.KeyExists("MaximumDiffingTime"))
+ {
+ BackupStoreFile::SetMaximumDiffingTime(conf.GetKeyValueInt("MaximumDiffingTime"));
+ }
+
+ // Setup various timings
+
+ // How often to connect to the store (approximate)
+ box_time_t updateStoreInterval = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("UpdateStoreInterval"));
+
+ // But are we connecting automatically?
+ bool automaticBackup = conf.GetKeyValueBool("AutomaticBackup");
+
+ // The minimum age a file needs to be before it will be considered for uploading
+ box_time_t minimumFileAge = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MinimumFileAge"));
+
+ // The maximum time we'll wait to upload a file, regardless of how often it's modified
+ box_time_t maxUploadWait = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MaxUploadWait"));
+ // Adjust by subtracting the minimum file age, so is relative to sync period end in comparisons
+ maxUploadWait = (maxUploadWait > minimumFileAge)?(maxUploadWait - minimumFileAge):(0);
+
+ // When the next sync should take place -- which is ASAP
+ box_time_t nextSyncTime = 0;
+
+ // When the last sync started (only updated if the store was not full when the sync ended)
+ box_time_t lastSyncTime = 0;
+
+ // --------------------------------------------------------------------------------------------
+
+ // And what's the current client store marker?
+ int64_t clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // haven't contacted the store yet
+
+ // Set state
+ SetState(State_Idle);
+
+ // Loop around doing backups
+ do
+ {
+ // Flags used below
+ bool storageLimitExceeded = false;
+ bool doSync = false;
+ bool doSyncForcedByCommand = false;
+
+ // Is a delay necessary?
+ {
+ box_time_t currentTime;
+ do
+ {
+ // Need to check the stop run thing here too, so this loop isn't run if we should be stopping
+ if(StopRun()) break;
+
+ currentTime = GetCurrentBoxTime();
+
+ // Pause a while, but no more than MAX_SLEEP_TIME seconds (use the conditional because times are unsigned)
+ box_time_t requiredDelay = (nextSyncTime < currentTime)?(0):(nextSyncTime - currentTime);
+ // If there isn't automatic backup happening, set a long delay. And limit delays at the same time.
+ if(!automaticBackup || requiredDelay > SecondsToBoxTime((uint32_t)MAX_SLEEP_TIME)) requiredDelay = SecondsToBoxTime((uint32_t)MAX_SLEEP_TIME);
+
+ // Only do the delay if there is a delay required
+ if(requiredDelay > 0)
+ {
+ // Sleep somehow. There are choices on how this should be done, depending on the state of the control connection
+ if(mpCommandSocketInfo != 0)
+ {
+ // A command socket exists, so sleep by handling connections with it
+ WaitOnCommandSocket(requiredDelay, doSync, doSyncForcedByCommand);
+ }
+ else
+ {
+ // No command socket or connection, just do a normal sleep
+ int sleepSeconds = BoxTimeToSeconds(requiredDelay);
+ ::sleep((sleepSeconds <= 0)?1:sleepSeconds);
+ }
+ }
+
+ } while((!automaticBackup || (currentTime < nextSyncTime)) && !doSync && !StopRun());
+ }
+
+ // Time of sync start, and if it's time for another sync (and we're doing automatic syncs), set the flag
+ box_time_t currentSyncStartTime = GetCurrentBoxTime();
+ if(automaticBackup && currentSyncStartTime >= nextSyncTime)
+ {
+ doSync = true;
+ }
+
+ // Use a script to see if sync is allowed now?
+ if(!doSyncForcedByCommand && doSync && !StopRun())
+ {
+ int d = UseScriptToSeeIfSyncAllowed();
+ if(d > 0)
+ {
+ // Script has asked for a delay
+ nextSyncTime = GetCurrentBoxTime() + SecondsToBoxTime((uint32_t)d);
+ doSync = false;
+ }
+ }
+
+ // Ready to sync? (but only if we're not supposed to be stopping)
+ if(doSync && !StopRun())
+ {
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_start");
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(true /* start */);
+
+ // Reset statistics on uploads
+ BackupStoreFile::ResetStats();
+
+ // Calculate the sync period of files to examine
+ box_time_t syncPeriodStart = lastSyncTime;
+ box_time_t syncPeriodEnd = currentSyncStartTime - minimumFileAge;
+ // Check logic
+ ASSERT(syncPeriodEnd > syncPeriodStart);
+ // Paranoid check on sync times
+ if(syncPeriodStart >= syncPeriodEnd) continue;
+
+ // Adjust syncPeriodEnd to emulate snapshot behaviour properly
+ box_time_t syncPeriodEndExtended = syncPeriodEnd;
+ // Using zero min file age?
+ if(minimumFileAge == 0)
+ {
+ // Add a year on to the end of the end time, to make sure we sync
+ // files which are modified after the scan run started.
+ // Of course, they may be eligable to be synced again the next time round,
+ // but this should be OK, because the changes only upload should upload no data.
+ syncPeriodEndExtended += SecondsToBoxTime((uint32_t)(356*24*3600));
+ }
+
+ // Do sync
+ bool errorOccurred = false;
+ int errorCode = 0, errorSubCode = 0;
+ try
+ {
+ // Set state and log start
+ SetState(State_Connected);
+ ::syslog(LOG_INFO, "Beginning scan of local files");
+
+ // Then create a client context object (don't just connect, as this may be unnecessary)
+ BackupClientContext clientContext(*this, tlsContext, conf.GetKeyValue("StoreHostname"),
+ conf.GetKeyValueInt("AccountNumber"), conf.GetKeyValueBool("ExtendedLogging"));
+
+ // Set up the sync parameters
+ BackupClientDirectoryRecord::SyncParams params(*this, clientContext);
+ params.mSyncPeriodStart = syncPeriodStart;
+ params.mSyncPeriodEnd = syncPeriodEndExtended; // use potentially extended end time
+ params.mMaxUploadWait = maxUploadWait;
+ params.mFileTrackingSizeThreshold = conf.GetKeyValueInt("FileTrackingSizeThreshold");
+ params.mDiffingUploadSizeThreshold = conf.GetKeyValueInt("DiffingUploadSizeThreshold");
+ params.mMaxFileTimeInFuture = SecondsToBoxTime((uint32_t)conf.GetKeyValueInt("MaxFileTimeInFuture"));
+
+ // Set store marker
+ clientContext.SetClientStoreMarker(clientStoreMarker);
+
+ // Set up the locations, if necessary -- need to do it here so we have a (potential) connection to use
+ if(mLocations.empty())
+ {
+ const Configuration &locations(conf.GetSubConfiguration("BackupLocations"));
+
+ // Make sure all the directory records are set up
+ SetupLocations(clientContext, locations);
+ }
+
+ // Get some ID maps going
+ SetupIDMapsForSync();
+
+ // Delete any unused directories?
+ DeleteUnusedRootDirEntries(clientContext);
+
+ // Go through the records, syncing them
+ for(std::vector<Location *>::const_iterator i(mLocations.begin()); i != mLocations.end(); ++i)
+ {
+ // Set current and new ID map pointers in the context
+ clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex], mNewIDMaps[(*i)->mIDMapIndex]);
+
+ // Set exclude lists (context doesn't take ownership)
+ clientContext.SetExcludeLists((*i)->mpExcludeFiles, (*i)->mpExcludeDirs);
+
+ // Sync the directory
+ (*i)->mpDirectoryRecord->SyncDirectory(params, BackupProtocolClientListDirectory::RootDirectory, (*i)->mPath);
+
+ // Unset exclude lists (just in case)
+ clientContext.SetExcludeLists(0, 0);
+ }
+
+ // Errors reading any files?
+ if(params.mReadErrorsOnFilesystemObjects)
+ {
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_ReadError);
+ }
+ else
+ {
+ // Unset the read error flag, so the error is
+ // reported again in the future
+ mNotificationsSent[NotifyEvent_ReadError] = false;
+ }
+
+ // Perform any deletions required -- these are delayed until the end
+ // to allow renaming to happen neatly.
+ clientContext.PerformDeletions();
+
+ // Close any open connection
+ clientContext.CloseAnyOpenConnection();
+
+ // Get the new store marker
+ clientStoreMarker = clientContext.GetClientStoreMarker();
+
+ // Check the storage limit
+ if(clientContext.StorageLimitExceeded())
+ {
+ // Tell the sysadmin about this
+ NotifySysadmin(NotifyEvent_StoreFull);
+ }
+ else
+ {
+ // The start time of the next run is the end time of this run
+ // This is only done if the storage limit wasn't exceeded (as things won't have been done properly if it was)
+ lastSyncTime = syncPeriodEnd;
+ // unflag the storage full notify flag so that next time the store is full, and alert will be sent
+ mNotificationsSent[NotifyEvent_StoreFull] = false;
+ }
+
+ // Calculate when the next sync run should be
+ nextSyncTime = currentSyncStartTime + updateStoreInterval + Random::RandomInt(updateStoreInterval >> SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
+
+ // Commit the ID Maps
+ CommitIDMapsAfterSync();
+
+ // Log
+ ::syslog(LOG_INFO, "Finished scan of local files");
+ }
+ catch(BoxException &e)
+ {
+ errorOccurred = true;
+ errorCode = e.GetType();
+ errorSubCode = e.GetSubType();
+ }
+ catch(...)
+ {
+ // TODO: better handling of exceptions here... need to be very careful
+ errorOccurred = true;
+ }
+
+ if(errorOccurred)
+ {
+ // Is it a berkely db failure?
+ bool isBerkelyDbFailure = (errorCode == BackupStoreException::ExceptionType
+ && errorSubCode == BackupStoreException::BerkelyDBFailure);
+ if(isBerkelyDbFailure)
+ {
+ // Delete corrupt files
+ DeleteCorruptBerkelyDbFiles();
+ }
+
+ // Clear state data
+ syncPeriodStart = 0; // go back to beginning of time
+ clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+
+ // Handle restart?
+ if(StopRun())
+ {
+ ::syslog(LOG_INFO, "Exception (%d/%d) due to signal", errorCode, errorSubCode);
+ return;
+ }
+
+ // If the Berkely db files get corrupted, delete them and try again immediately
+ if(isBerkelyDbFailure)
+ {
+ ::syslog(LOG_ERR, "Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.\n");
+ ::sleep(1);
+ }
+ else
+ {
+ // Not restart/terminate, pause and retry
+ SetState(State_Error);
+ ::syslog(LOG_ERR, "Exception caught (%d/%d), reset state and waiting to retry...", errorCode, errorSubCode);
+ ::sleep(100);
+ }
+ }
+
+ // Log the stats
+ ::syslog(LOG_INFO, "File statistics: total file size uploaded %lld, bytes already on server %lld, encoded size %lld",
+ BackupStoreFile::msStats.mBytesInEncodedFiles, BackupStoreFile::msStats.mBytesAlreadyOnServer,
+ BackupStoreFile::msStats.mTotalFileStreamSize);
+ BackupStoreFile::ResetStats();
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(false /* finish */);
+
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_finish");
+ }
+
+ // Set state
+ SetState(storageLimitExceeded?State_StorageLimitExceeded:State_Idle);
+
+ } while(!StopRun());
+
+ // Make sure we have a clean start next time round (if restart)
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::UseScriptToSeeIfSyncAllowed()
+// Purpose: Private. Use a script to see if the sync should be allowed (if configured)
+// Returns -1 if it's allowed, time in seconds to wait otherwise.
+// Created: 21/6/04
+//
+// --------------------------------------------------------------------------
+int BackupDaemon::UseScriptToSeeIfSyncAllowed()
+{
+ const Configuration &conf(GetConfiguration());
+
+ // Got a script to run?
+ if(!conf.KeyExists("SyncAllowScript"))
+ {
+ // No. Do sync.
+ return -1;
+ }
+
+ // If there's no result, try again in five minutes
+ int waitInSeconds = (60*5);
+
+ // Run it?
+ pid_t pid = 0;
+ try
+ {
+ std::auto_ptr<IOStream> pscript(LocalProcessStream(conf.GetKeyValue("SyncAllowScript").c_str(), pid));
+
+ // Read in the result
+ IOStreamGetLine getLine(*pscript);
+ std::string line;
+ if(getLine.GetLine(line, true, 30000)) // 30 seconds should be enough
+ {
+ // Got a string, intepret
+ if(line == "now")
+ {
+ // Script says do it now. Obey.
+ waitInSeconds = -1;
+ }
+ else
+ {
+ // How many seconds to wait?
+ waitInSeconds = BoxConvert::Convert<int32_t, const std::string&>(line);
+ ::syslog(LOG_INFO, "Delaying sync by %d seconds (SyncAllowScript '%s')", waitInSeconds, conf.GetKeyValue("SyncAllowScript").c_str());
+ }
+ }
+
+ // Wait and then cleanup child process
+ int status = 0;
+ ::waitpid(pid, &status, 0);
+ }
+ catch(...)
+ {
+ // Ignore any exceptions
+ // Log that something bad happened
+ ::syslog(LOG_ERR, "Error running SyncAllowScript '%s'", conf.GetKeyValue("SyncAllowScript").c_str());
+ // Clean up though
+ if(pid != 0)
+ {
+ int status = 0;
+ ::waitpid(pid, &status, 0);
+ }
+ }
+
+ return waitInSeconds;
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::WaitOnCommandSocket(box_time_t, bool &, bool &)
+// Purpose: Waits on a the command socket for a time of UP TO the required time
+// but may be much less, and handles a command if necessary.
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut)
+{
+ ASSERT(mpCommandSocketInfo != 0);
+ if(mpCommandSocketInfo == 0) {::sleep(1); return;} // failure case isn't too bad
+
+ TRACE1("Wait on command socket, delay = %lld\n", RequiredDelay);
+
+ try
+ {
+ // Timeout value for connections and things
+ int timeout = ((int)BoxTimeToMilliSeconds(RequiredDelay)) + 1;
+ // Handle bad boundary cases
+ if(timeout <= 0) timeout = 1;
+ if(timeout == INFTIM) timeout = 100000;
+
+ // Wait for socket connection, or handle a command?
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ // No connection, listen for a new one
+ mpCommandSocketInfo->mpConnectedSocket.reset(mpCommandSocketInfo->mListeningSocket.Accept(timeout).release());
+
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ // If a connection didn't arrive, there was a timeout, which means we've
+ // waited long enough and it's time to go.
+ return;
+ }
+ else
+ {
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+ bool uidOK = true;
+ ::syslog(LOG_ERR, "On this platform, no security check can be made on the credientials of peers connecting to the command socket. (bbackupctl)");
+#else
+ // Security check -- does the process connecting to this socket have
+ // the same UID as this process?
+ bool uidOK = false;
+ // BLOCK
+ {
+ uid_t remoteEUID = 0xffff;
+ gid_t remoteEGID = 0xffff;
+ if(mpCommandSocketInfo->mpConnectedSocket->GetPeerCredentials(remoteEUID, remoteEGID))
+ {
+ // Credentials are available -- check UID
+ if(remoteEUID == ::getuid())
+ {
+ // Acceptable
+ uidOK = true;
+ }
+ }
+ }
+#endif
+
+ // Is this an acceptible connection?
+ if(!uidOK)
+ {
+ // Dump the connection
+ ::syslog(LOG_ERR, "Incoming command connection from peer had different user ID than this process, or security check could not be completed.");
+ mpCommandSocketInfo->mpConnectedSocket.reset();
+ return;
+ }
+ else
+ {
+ // Log
+ ::syslog(LOG_INFO, "Connection from command socket");
+
+ // Send a header line summarising the configuration and current state
+ const Configuration &conf(GetConfiguration());
+ char summary[256];
+ int summarySize = sprintf(summary, "bbackupd: %d %d %d %d\nstate %d\n",
+ conf.GetKeyValueBool("AutomaticBackup"),
+ conf.GetKeyValueInt("UpdateStoreInterval"),
+ conf.GetKeyValueInt("MinimumFileAge"),
+ conf.GetKeyValueInt("MaxUploadWait"),
+ mState);
+ mpCommandSocketInfo->mpConnectedSocket->Write(summary, summarySize);
+
+ // Set the timeout to something very small, so we don't wait too long on waiting
+ // for any incoming data
+ timeout = 10; // milliseconds
+ }
+ }
+ }
+
+ // So there must be a connection now.
+ ASSERT(mpCommandSocketInfo->mpConnectedSocket.get() != 0);
+
+ // Is there a getline object ready?
+ if(mpCommandSocketInfo->mpGetLine == 0)
+ {
+ // Create a new one
+ mpCommandSocketInfo->mpGetLine = new IOStreamGetLine(*(mpCommandSocketInfo->mpConnectedSocket.get()));
+ }
+
+ // Ping the remote side, to provide errors which will mean the socket gets closed
+ mpCommandSocketInfo->mpConnectedSocket->Write("ping\n", 5);
+
+ // Wait for a command or something on the socket
+ std::string command;
+ while(mpCommandSocketInfo->mpGetLine != 0 && !mpCommandSocketInfo->mpGetLine->IsEOF()
+ && mpCommandSocketInfo->mpGetLine->GetLine(command, false /* no preprocessing */, timeout))
+ {
+ TRACE1("Receiving command '%s' over command socket\n", command.c_str());
+
+ bool sendOK = false;
+ bool sendResponse = true;
+
+ // Command to process!
+ if(command == "quit" || command == "")
+ {
+ // Close the socket.
+ CloseCommandConnection();
+ sendResponse = false;
+ }
+ else if(command == "sync")
+ {
+ // Sync now!
+ DoSyncFlagOut = true;
+ SyncIsForcedOut = false;
+ sendOK = true;
+ }
+ else if(command == "force-sync")
+ {
+ // Sync now (forced -- overrides any SyncAllowScript)
+ DoSyncFlagOut = true;
+ SyncIsForcedOut = true;
+ sendOK = true;
+ }
+ else if(command == "reload")
+ {
+ // Reload the configuration
+ SetReloadConfigWanted();
+ sendOK = true;
+ }
+ else if(command == "terminate")
+ {
+ // Terminate the daemon cleanly
+ SetTerminateWanted();
+ sendOK = true;
+ }
+
+ // Send a response back?
+ if(sendResponse)
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(sendOK?"ok\n":"error\n", sendOK?3:6);
+ }
+
+ // Set timeout to something very small, so this just checks for data which is waiting
+ timeout = 1;
+ }
+
+ // Close on EOF?
+ if(mpCommandSocketInfo->mpGetLine != 0 && mpCommandSocketInfo->mpGetLine->IsEOF())
+ {
+ CloseCommandConnection();
+ }
+ }
+ catch(...)
+ {
+ // If an error occurs, and there is a connection active, just close that
+ // connection and continue. Otherwise, let the error propagate.
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ throw;
+ }
+ else
+ {
+ // Close socket and ignore error
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CloseCommandConnection()
+// Purpose: Close the command connection, ignoring any errors
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::CloseCommandConnection()
+{
+ try
+ {
+ TRACE0("Closing command connection\n");
+
+ if(mpCommandSocketInfo->mpGetLine)
+ {
+ delete mpCommandSocketInfo->mpGetLine;
+ mpCommandSocketInfo->mpGetLine = 0;
+ }
+ mpCommandSocketInfo->mpConnectedSocket.reset();
+ }
+ catch(...)
+ {
+ // Ignore any errors
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.cpp
+// Purpose: Send a start or finish sync message to the command socket, if it's connected.
+//
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SendSyncStartOrFinish(bool SendStart)
+{
+
+ // The bbackupctl program can't rely on a state change, because it may never
+ // change if the server doesn't need to be contacted.
+
+ if(mpCommandSocketInfo != 0 && mpCommandSocketInfo->mpConnectedSocket.get() != 0)
+ {
+ try
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(SendStart?"start-sync\n":"finish-sync\n", SendStart?11:12);
+ }
+ catch(...)
+ {
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+
+
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // string comparison ordering for when mount points are handled
+ // by code, rather than the OS.
+ typedef struct
+ {
+ bool operator()(const std::string &s1, const std::string &s2)
+ {
+ if(s1.size() == s2.size())
+ {
+ // Equal size, sort according to natural sort order
+ return s1 < s2;
+ }
+ else
+ {
+ // Make sure longer strings go first
+ return s1.size() > s2.size();
+ }
+ }
+ } mntLenCompare;
+#endif
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupLocations(BackupClientContext &, const Configuration &)
+// Purpose: Makes sure that the list of directories records is correctly set up
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf)
+{
+ if(!mLocations.empty())
+ {
+ // Looks correctly set up
+ return;
+ }
+
+ // Make sure that if a directory is reinstated, then it doesn't get deleted
+ mDeleteUnusedRootDirEntriesAfter = 0;
+ mUnusedRootDirEntries.clear();
+
+ // Just a check to make sure it's right.
+ DeleteAllLocations();
+
+ // Going to need a copy of the root directory. Get a connection, and fetch it.
+ BackupProtocolClient &connection(rClientContext.GetConnection());
+
+ // Ask server for a list of everything in the root directory, which is a directory itself
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
+ BackupProtocolClientListDirectory::RootDirectory,
+ BackupProtocolClientListDirectory::Flags_Dir, // only directories
+ BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
+ false /* no attributes */));
+
+ // Retrieve the directory from the stream following
+ BackupStoreDirectory dir;
+ std::auto_ptr<IOStream> dirstream(connection.ReceiveStream());
+ dir.ReadFromStream(*dirstream, connection.GetTimeout());
+
+ // Map of mount names to ID map index
+ std::map<std::string, int> mounts;
+ int numIDMaps = 0;
+
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // Linux can't tell you where a directory is mounted. So we have to
+ // read the mount entries from /etc/mtab! Bizarre that the OS itself
+ // can't tell you, but there you go.
+ std::set<std::string, mntLenCompare> mountPoints;
+ // BLOCK
+ FILE *mountPointsFile = 0;
+ try
+ {
+ // Open mounts file
+ mountPointsFile = ::setmntent("/etc/mtab", "r");
+ if(mountPointsFile == 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError);
+ }
+
+ // Read all the entries, and put them in the set
+ struct mntent *entry = 0;
+ while((entry = ::getmntent(mountPointsFile)) != 0)
+ {
+ TRACE1("Found mount point at %s\n", entry->mnt_dir);
+ mountPoints.insert(std::string(entry->mnt_dir));
+ }
+
+ // Close mounts file
+ ::endmntent(mountPointsFile);
+ }
+ catch(...)
+ {
+ if(mountPointsFile != 0)
+ {
+ ::endmntent(mountPointsFile);
+ }
+ throw;
+ }
+ // Check sorting and that things are as we expect
+ ASSERT(mountPoints.size() > 0);
+#ifndef NDEBUG
+ {
+ std::set<std::string, mntLenCompare>::const_reverse_iterator i(mountPoints.rbegin());
+ ASSERT(*i == "/");
+ }
+#endif // n NDEBUG
+#endif // PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+
+ // Then... go through each of the entries in the configuration,
+ // making sure there's a directory created for it.
+ for(std::list<std::pair<std::string, Configuration> >::const_iterator i = rLocationsConf.mSubConfigurations.begin();
+ i != rLocationsConf.mSubConfigurations.end(); ++i)
+ {
+TRACE0("new location\n");
+ // Create a record for it
+ Location *ploc = new Location;
+ try
+ {
+ // Setup names in the location record
+ ploc->mName = i->first;
+ ploc->mPath = i->second.GetKeyValue("Path");
+
+ // Read the exclude lists from the Configuration
+ ploc->mpExcludeFiles = BackupClientMakeExcludeList_Files(i->second);
+ ploc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(i->second);
+
+ // Do a fsstat on the pathname to find out which mount it's on
+ {
+#ifdef PLATFORM_USES_MTAB_FILE_FOR_MOUNTS
+ // Warn in logs if the directory isn't absolute
+ if(ploc->mPath[0] != '/')
+ {
+ ::syslog(LOG_ERR, "Location path '%s' isn't absolute", ploc->mPath.c_str());
+ }
+ // Go through the mount points found, and find a suitable one
+ std::string mountName("/");
+ {
+ std::set<std::string, mntLenCompare>::const_iterator i(mountPoints.begin());
+ TRACE1("%d potential mount points\n", mountPoints.size());
+ for(; i != mountPoints.end(); ++i)
+ {
+ // Compare first n characters with the filename
+ // If it matches, the file belongs in that mount point
+ // (sorting order ensures this)
+ TRACE1("checking against mount point %s\n", i->c_str());
+ if(::strncmp(i->c_str(), ploc->mPath.c_str(), i->size()) == 0)
+ {
+ // Match
+ mountName = *i;
+ break;
+ }
+ }
+ TRACE2("mount point chosen for %s is %s\n", ploc->mPath.c_str(), mountName.c_str());
+ }
+#else
+ // BSD style statfs -- includes mount point, which is nice.
+ struct statfs s;
+ if(::statfs(ploc->mPath.c_str(), &s) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
+ // Where the filesystem is mounted
+ std::string mountName(s.f_mntonname);
+#endif
+
+ // Got it?
+ std::map<std::string, int>::iterator f(mounts.find(mountName));
+ if(f != mounts.end())
+ {
+ // Yes -- store the index
+ ploc->mIDMapIndex = f->second;
+ }
+ else
+ {
+ // No -- new index
+ ploc->mIDMapIndex = numIDMaps;
+ mounts[mountName] = numIDMaps;
+
+ // Store the mount name
+ mIDMapMounts.push_back(mountName);
+
+ // Increment number of maps
+ ++numIDMaps;
+ }
+ }
+
+ // Does this exist on the server?
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreFilenameClear dirname(ploc->mName); // generate the filename
+ BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
+ int64_t oid = 0;
+ if(en != 0)
+ {
+ oid = en->GetObjectID();
+
+ // Delete the entry from the directory, so we get a list of
+ // unused root directories at the end of this.
+ dir.DeleteEntry(oid);
+ }
+ else
+ {
+ // Doesn't exist, so it has to be created on the server. Let's go!
+ // First, get the directory's attributes and modification time
+ box_time_t attrModTime = 0;
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(ploc->mPath.c_str(), true /* directories have zero mod times */,
+ 0 /* not interested in mod time */, &attrModTime /* get the attribute modification time */);
+
+ // Execute create directory command
+ MemBlockStream attrStream(attr);
+ std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
+ BackupProtocolClientListDirectory::RootDirectory,
+ attrModTime, dirname, attrStream));
+
+ // Object ID for later creation
+ oid = dirCreate->GetObjectID();
+ }
+
+ // Create and store the directory object for the root of this location
+ ASSERT(oid != 0);
+ BackupClientDirectoryRecord *precord = new BackupClientDirectoryRecord(oid, i->first);
+ ploc->mpDirectoryRecord.reset(precord);
+
+ // Push it back on the vector of locations
+ mLocations.push_back(ploc);
+ }
+ catch(...)
+ {
+ delete ploc;
+ ploc = 0;
+ throw;
+ }
+ }
+
+ // Any entries in the root directory which need deleting?
+ if(dir.GetNumberOfEntries() > 0)
+ {
+ ::syslog(LOG_INFO, "%d redundant locations in root directory found, will delete from store after %d seconds.",
+ dir.GetNumberOfEntries(), BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
+
+ // Store directories in list of things to delete
+ mUnusedRootDirEntries.clear();
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreDirectory::Entry *en = 0;
+ while((en = iter.Next()) != 0)
+ {
+ // Add name to list
+ BackupStoreFilenameClear clear(en->GetName());
+ const std::string &name(clear.GetClearFilename());
+ mUnusedRootDirEntries.push_back(std::pair<int64_t,std::string>(en->GetObjectID(), name));
+ // Log this
+ ::syslog(LOG_INFO, "Unused location in root: %s", name.c_str());
+ }
+ ASSERT(mUnusedRootDirEntries.size() > 0);
+ // Time to delete them
+ mDeleteUnusedRootDirEntriesAfter =
+ GetCurrentBoxTime() + SecondsToBoxTime((uint32_t)BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetupIDMapsForSync()
+// Purpose: Sets up ID maps for the sync process -- make sure they're all there
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetupIDMapsForSync()
+{
+ // Need to do different things depending on whether it's an in memory implementation,
+ // or whether it's all stored on disc.
+
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+
+ // Make sure we have some blank, empty ID maps
+ DeleteIDMapVector(mNewIDMaps);
+ FillIDMapVector(mNewIDMaps, true /* new maps */);
+
+ // Then make sure that the current maps have objects, even if they are empty
+ // (for the very first run)
+ if(mCurrentIDMaps.empty())
+ {
+ FillIDMapVector(mCurrentIDMaps, false /* current maps */);
+ }
+
+#else
+
+ // Make sure we have some blank, empty ID maps
+ DeleteIDMapVector(mNewIDMaps);
+ FillIDMapVector(mNewIDMaps, true /* new maps */);
+ DeleteIDMapVector(mCurrentIDMaps);
+ FillIDMapVector(mCurrentIDMaps, false /* new maps */);
+
+#endif
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &)
+// Purpose: Fills the vector with the right number of empty ID maps
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector, bool NewMaps)
+{
+ ASSERT(rVector.size() == 0);
+ rVector.reserve(mIDMapMounts.size());
+
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ // Create the object
+ BackupClientInodeToIDMap *pmap = new BackupClientInodeToIDMap();
+ try
+ {
+ // Get the base filename of this map
+ std::string filename;
+ MakeMapBaseName(l, filename);
+
+ // If it's a new one, add a suffix
+ if(NewMaps)
+ {
+ filename += ".n";
+ }
+
+ // If it's not a new map, it may not exist in which case an empty map should be created
+ if(!NewMaps && !FileExists(filename.c_str()))
+ {
+ pmap->OpenEmpty();
+ }
+ else
+ {
+ // Open the map
+ pmap->Open(filename.c_str(), !NewMaps /* read only */, NewMaps /* create new */);
+ }
+
+ // Store on vector
+ rVector.push_back(pmap);
+ }
+ catch(...)
+ {
+ delete pmap;
+ throw;
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteCorruptBerkelyDbFiles()
+// Purpose: Delete the Berkely db files from disc after they have been corrupted.
+// Created: 14/9/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteCorruptBerkelyDbFiles()
+{
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ // Get the base filename of this map
+ std::string filename;
+ MakeMapBaseName(l, filename);
+
+ // Delete the file
+ TRACE1("Deleting %s\n", filename.c_str());
+ ::unlink(filename.c_str());
+
+ // Add a suffix for the new map
+ filename += ".n";
+
+ // Delete that too
+ TRACE1("Deleting %s\n", filename.c_str());
+ ::unlink(filename.c_str());
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: MakeMapBaseName(unsigned int, std::string &)
+// Purpose: Makes the base name for a inode map
+// Created: 20/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::MakeMapBaseName(unsigned int MountNumber, std::string &rNameOut) const
+{
+ // Get the directory for the maps
+ const Configuration &config(GetConfiguration());
+ std::string dir(config.GetKeyValue("DataDirectory"));
+
+ // Make a leafname
+ std::string leaf(mIDMapMounts[MountNumber]);
+ for(unsigned int z = 0; z < leaf.size(); ++z)
+ {
+ if(leaf[z] == DIRECTORY_SEPARATOR_ASCHAR)
+ {
+ leaf[z] = '_';
+ }
+ }
+
+ // Build the final filename
+ rNameOut = dir + DIRECTORY_SEPARATOR "mnt" + leaf;
+}
+
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommitIDMapsAfterSync()
+// Purpose: Commits the new ID maps, so the 'new' maps are now the 'current' maps.
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::CommitIDMapsAfterSync()
+{
+ // Need to do different things depending on whether it's an in memory implementation,
+ // or whether it's all stored on disc.
+
+#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
+ // Remove the current ID maps
+ DeleteIDMapVector(mCurrentIDMaps);
+
+ // Copy the (pointers to) "new" maps over to be the new "current" maps
+ mCurrentIDMaps = mNewIDMaps;
+
+ // Clear the new ID maps vector (not delete them!)
+ mNewIDMaps.clear();
+
+#else
+
+ // Get rid of the maps in memory (leaving them on disc of course)
+ DeleteIDMapVector(mCurrentIDMaps);
+ DeleteIDMapVector(mNewIDMaps);
+
+ // Then move the old maps into the new places
+ for(unsigned int l = 0; l < mIDMapMounts.size(); ++l)
+ {
+ std::string target;
+ MakeMapBaseName(l, target);
+ std::string newmap(target + ".n");
+
+ // Try to rename
+ if(::rename(newmap.c_str(), target.c_str()) != 0)
+ {
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+ }
+
+#endif
+}
+
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &)
+// Purpose: Deletes the contents of a vector of ID maps
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector)
+{
+ while(!rVector.empty())
+ {
+ // Pop off list
+ BackupClientInodeToIDMap *toDel = rVector.back();
+ rVector.pop_back();
+
+ // Close and delete
+ toDel->Close();
+ delete toDel;
+ }
+ ASSERT(rVector.size() == 0);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::FindLocationPathName(const std::string &, std::string &) const
+// Purpose: Tries to find the path of the root of a backup location. Returns true (and path in rPathOut)
+// if it can be found, false otherwise.
+// Created: 12/11/03
+//
+// --------------------------------------------------------------------------
+bool BackupDaemon::FindLocationPathName(const std::string &rLocationName, std::string &rPathOut) const
+{
+ // Search for the location
+ for(std::vector<Location *>::const_iterator i(mLocations.begin()); i != mLocations.end(); ++i)
+ {
+ if((*i)->mName == rLocationName)
+ {
+ rPathOut = (*i)->mPath;
+ return true;
+ }
+ }
+
+ // Didn't find it
+ return false;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::SetState(int)
+// Purpose: Record current action of daemon, and update process title to reflect this
+// Created: 11/12/03
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::SetState(int State)
+{
+ // Two little checks
+ if(State == mState) return;
+ if(State < 0) return;
+
+ // Update
+ mState = State;
+
+ // Set process title
+ const static char *stateText[] = {"idle", "connected", "error -- waiting for retry", "over limit on server -- not backing up"};
+ SetProcessTitle(stateText[State]);
+
+ // If there's a command socket connected, then inform it -- disconnecting from the
+ // command socket if there's an error
+ if(mpCommandSocketInfo != 0 && mpCommandSocketInfo->mpConnectedSocket.get() != 0)
+ {
+ // Something connected to the command socket, tell it about the new state
+ char newState[64];
+ char newStateSize = sprintf(newState, "state %d\n", State);
+ try
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(newState, newStateSize);
+ }
+ catch(...)
+ {
+ CloseCommandConnection();
+ }
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::TouchFileInWorkingDir(const char *)
+// Purpose: Make sure a zero length file of the name exists in the working directory.
+// Use for marking times of events in the filesystem.
+// Created: 21/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::TouchFileInWorkingDir(const char *Filename)
+{
+ // Filename
+ const Configuration &config(GetConfiguration());
+ std::string fn(config.GetKeyValue("DataDirectory") + DIRECTORY_SEPARATOR_ASCHAR);
+ fn += Filename;
+
+ // Open and close it to update the timestamp
+ FileStream touch(fn.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::NotifySysadmin(int)
+// Purpose: Run the script to tell the sysadmin about events which need attention.
+// Created: 25/2/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::NotifySysadmin(int Event)
+{
+ static const char *sEventNames[] = {"store-full", "read-error", 0};
+
+ TRACE1("BackupDaemon::NotifySysadmin() called, event = %d\n", Event);
+
+ if(Event < 0 || Event > NotifyEvent__MAX)
+ {
+ THROW_EXCEPTION(BackupStoreException, BadNotifySysadminEventCode);
+ }
+
+ // Don't send lots of repeated messages
+ if(mNotificationsSent[Event])
+ {
+ return;
+ }
+
+ // Is there a notifation script?
+ const Configuration &conf(GetConfiguration());
+ if(!conf.KeyExists("NotifyScript"))
+ {
+ // Log, and then return
+ ::syslog(LOG_ERR, "Not notifying administrator about event %s -- set NotifyScript to do this in future", sEventNames[Event]);
+ return;
+ }
+
+ // Script to run
+ std::string script(conf.GetKeyValue("NotifyScript") + ' ' + sEventNames[Event]);
+
+ // Log what we're about to do
+ ::syslog(LOG_INFO, "About to notify administrator about event %s, running script '%s'", sEventNames[Event], script.c_str());
+
+ // Then do it
+ if(::system(script.c_str()) != 0)
+ {
+ ::syslog(LOG_ERR, "Notify script returned an error code. ('%s')", script.c_str());
+ }
+
+ // Flag that this is done so the administrator isn't constantly bombarded with lots of errors
+ mNotificationsSent[Event] = true;
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &)
+// Purpose: Deletes any unused entries in the root directory, if they're scheduled to be deleted.
+// Created: 13/5/04
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &rContext)
+{
+ if(mUnusedRootDirEntries.empty() || mDeleteUnusedRootDirEntriesAfter == 0)
+ {
+ // Nothing to do.
+ return;
+ }
+
+ // Check time
+ if(GetCurrentBoxTime() < mDeleteUnusedRootDirEntriesAfter)
+ {
+ // Too early to delete files
+ return;
+ }
+
+ // Entries to delete, and it's the right time to do so...
+ ::syslog(LOG_INFO, "Deleting unused locations from store root...");
+ BackupProtocolClient &connection(rContext.GetConnection());
+ for(std::vector<std::pair<int64_t,std::string> >::iterator i(mUnusedRootDirEntries.begin()); i != mUnusedRootDirEntries.end(); ++i)
+ {
+ connection.QueryDeleteDirectory(i->first);
+
+ // Log this
+ ::syslog(LOG_INFO, "Deleted %s (ID %08llx) from store root", i->second.c_str(), i->first);
+ }
+
+ // Reset state
+ mDeleteUnusedRootDirEntriesAfter = 0;
+ mUnusedRootDirEntries.clear();
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Location::Location()
+// Purpose: Constructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupDaemon::Location::Location()
+ : mIDMapIndex(0),
+ mpExcludeFiles(0),
+ mpExcludeDirs(0)
+{
+}
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Location::~Location()
+// Purpose: Destructor
+// Created: 11/11/03
+//
+// --------------------------------------------------------------------------
+BackupDaemon::Location::~Location()
+{
+ // Clean up exclude locations
+ if(mpExcludeDirs != 0)
+ {
+ delete mpExcludeDirs;
+ mpExcludeDirs = 0;
+ }
+ if(mpExcludeFiles != 0)
+ {
+ delete mpExcludeFiles;
+ mpExcludeFiles = 0;
+ }
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommandSocketInfo::CommandSocketInfo()
+// Purpose: Constructor
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+BackupDaemon::CommandSocketInfo::CommandSocketInfo()
+ : mpGetLine(0)
+{
+}
+
+
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
+// Purpose: Destructor
+// Created: 18/2/04
+//
+// --------------------------------------------------------------------------
+BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
+{
+ if(mpGetLine)
+ {
+ delete mpGetLine;
+ mpGetLine = 0;
+ }
+}
+
diff --git a/bin/bbackupd/BackupDaemon.h b/bin/bbackupd/BackupDaemon.h
new file mode 100755
index 00000000..ffaf5783
--- /dev/null
+++ b/bin/bbackupd/BackupDaemon.h
@@ -0,0 +1,166 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemon.h
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPDAEMON__H
+#define BACKUPDAEMON__H
+
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "Daemon.h"
+#include "BoxTime.h"
+#include "Socket.h"
+#include "SocketListen.h"
+#include "SocketStream.h"
+
+class BackupClientDirectoryRecord;
+class BackupClientContext;
+class Configuration;
+class BackupClientInodeToIDMap;
+class ExcludeList;
+class IOStreamGetLine;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: BackupDaemon
+// Purpose: Backup daemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class BackupDaemon : public Daemon
+{
+public:
+ BackupDaemon();
+ ~BackupDaemon();
+private:
+ BackupDaemon(const BackupDaemon &);
+public:
+
+ void Run();
+ virtual const char *DaemonName() const;
+ virtual const char *DaemonBanner() const;
+ const ConfigurationVerify *GetConfigVerify() const;
+
+ bool FindLocationPathName(const std::string &rLocationName, std::string &rPathOut) const;
+
+ enum
+ {
+ // Add stuff to this, make sure the textual equivalents in SetState() are changed too.
+ State_Initialising = -1,
+ State_Idle = 0,
+ State_Connected = 1,
+ State_Error = 2,
+ State_StorageLimitExceeded = 3
+ };
+
+ int GetState() {return mState;}
+
+ // Allow other classes to call this too
+ enum
+ {
+ NotifyEvent_StoreFull = 0,
+ NotifyEvent_ReadError = 1,
+ NotifyEvent__MAX = 1
+ // When adding notifications, remember to add strings to NotifySysadmin()
+ };
+ void NotifySysadmin(int Event);
+
+private:
+ void Run2();
+
+ void DeleteAllLocations();
+ void SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf);
+
+ void DeleteIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector);
+ void DeleteAllIDMaps()
+ {
+ DeleteIDMapVector(mCurrentIDMaps);
+ DeleteIDMapVector(mNewIDMaps);
+ }
+ void FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVector, bool NewMaps);
+
+ void SetupIDMapsForSync();
+ void CommitIDMapsAfterSync();
+ void DeleteCorruptBerkelyDbFiles();
+
+ void MakeMapBaseName(unsigned int MountNumber, std::string &rNameOut) const;
+
+ void SetState(int State);
+
+ void WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut);
+ void CloseCommandConnection();
+ void SendSyncStartOrFinish(bool SendStart);
+
+ void TouchFileInWorkingDir(const char *Filename);
+
+ void DeleteUnusedRootDirEntries(BackupClientContext &rContext);
+
+#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
+ // For warning user about potential security hole
+ virtual void SetupInInitialProcess();
+#endif
+
+ int UseScriptToSeeIfSyncAllowed();
+
+private:
+ class Location
+ {
+ public:
+ Location();
+ ~Location();
+ private:
+ Location(const Location &); // copy not allowed
+ Location &operator=(const Location &);
+ public:
+ std::string mName;
+ std::string mPath;
+ std::auto_ptr<BackupClientDirectoryRecord> mpDirectoryRecord;
+ int mIDMapIndex;
+ ExcludeList *mpExcludeFiles;
+ ExcludeList *mpExcludeDirs;
+ };
+
+ int mState; // what the daemon is currently doing
+
+ std::vector<Location *> mLocations;
+
+ std::vector<std::string> mIDMapMounts;
+ std::vector<BackupClientInodeToIDMap *> mCurrentIDMaps;
+ std::vector<BackupClientInodeToIDMap *> mNewIDMaps;
+
+ // For the command socket
+ class CommandSocketInfo
+ {
+ public:
+ CommandSocketInfo();
+ ~CommandSocketInfo();
+ private:
+ CommandSocketInfo(const CommandSocketInfo &); // no copying
+ CommandSocketInfo &operator=(const CommandSocketInfo &);
+ public:
+ SocketListen<SocketStream, 1 /* listen backlog */> mListeningSocket;
+ std::auto_ptr<SocketStream> mpConnectedSocket;
+ IOStreamGetLine *mpGetLine;
+ };
+
+ // Using a socket?
+ CommandSocketInfo *mpCommandSocketInfo;
+
+ // Stop notifications being repeated.
+ bool mNotificationsSent[NotifyEvent__MAX + 1];
+
+ // Unused entries in the root directory wait a while before being deleted
+ box_time_t mDeleteUnusedRootDirEntriesAfter; // time to delete them
+ std::vector<std::pair<int64_t,std::string> > mUnusedRootDirEntries;
+};
+
+#endif // BACKUPDAEMON__H
+
diff --git a/bin/bbackupd/bbackupd-config b/bin/bbackupd/bbackupd-config
new file mode 100755
index 00000000..c5e52282
--- /dev/null
+++ b/bin/bbackupd/bbackupd-config
@@ -0,0 +1,525 @@
+#!/usr/bin/perl
+use strict;
+
+# should be running as root
+if($> != 0)
+{
+ printf "\nWARNING: this should be run as root\n\n"
+}
+
+sub error_print_usage
+{
+ print <<__E;
+
+Setup bbackupd config utility.
+
+Bad command line parameters.
+Usage:
+ bbackupd-config config-dir backup-mode account-num server-hostname working-dir backup-dir [more backup directories]
+
+config-dir usually /etc/box
+backup-mode is lazy or snapshot
+ lazy mode runs continously, uploading files over a specified age
+ snapshot mode uploads a snapshot of the filesystem when instructed explicitly
+account-num (hexdecimal) and server-hostname as supplied from the server administrator
+working-dir usually /var/bbackupd
+backup-dir, list of directories to back up
+
+__E
+ print "=========\nERROR:\n",$_[0],"\n\n" if $_[0] ne '';
+ exit(1);
+}
+
+# check and get command line parameters
+if($#ARGV < 4)
+{
+ error_print_usage();
+}
+
+# check for OPENSSL_CONF environment var being set
+if(exists $ENV{'OPENSSL_CONF'})
+{
+ print <<__E;
+
+---------------------------------------
+
+WARNING:
+ You have the OPENSSL_CONF environment variable set.
+ Use of non-standard openssl configs may cause problems.
+
+---------------------------------------
+
+__E
+}
+
+# default locations
+my $default_config_location = '/etc/box/bbackupd.conf';
+
+# command line parameters
+my ($config_dir,$backup_mode,$account_num,$server,$working_dir,@tobackup) = @ARGV;
+
+# check backup mode is valid
+if($backup_mode ne 'lazy' && $backup_mode ne 'snapshot')
+{
+ error_print_usage("ERROR: backup mode must be 'lazy' or 'snapshot'");
+}
+
+# check server exists
+{
+ my @r = gethostbyname($server);
+ if($#r < 0)
+ {
+ error_print_usage("Backup server specified as '$server', but it could not found.\n(A test DNS lookup failed -- check arguments)");
+ }
+}
+
+if($working_dir !~ m~\A/~)
+{
+ error_print_usage("Working directory $working_dir is not specified as an absolute path");
+}
+
+# ssl stuff
+my $private_key = "$config_dir/bbackupd/$account_num-key.pem";
+my $certificate_request = "$config_dir/bbackupd/$account_num-csr.pem";
+my $certificate = "$config_dir/bbackupd/$account_num-cert.pem";
+my $ca_root_cert = "$config_dir/bbackupd/serverCA.pem";
+
+# encryption keys
+my $enc_key_file = "$config_dir/bbackupd/$account_num-FileEncKeys.raw";
+
+# other files
+my $config_file = "$config_dir/bbackupd.conf";
+my $notify_script = "$config_dir/bbackupd/NotifySysadmin.sh";
+
+# check that the directories are allowable
+for(@tobackup)
+{
+ if($_ eq '/')
+ {
+ die "It is not recommended that you backup the root directory of your disc";
+ }
+ if($_ !~ m/\A\//)
+ {
+ die "Directory $_ is not specified as an absolute path";
+ }
+ if(!-d $_)
+ {
+ die "$_ is not a directory";
+ }
+}
+
+# summarise configuration
+
+print <<__E;
+
+Setup bbackupd config utility.
+
+Configuration:
+ Writing configuration file: $config_file
+ Account: $account_num
+ Server hostname: $server
+ Directories to back up:
+__E
+print ' ',$_,"\n" for(@tobackup);
+print <<__E;
+
+Note: If other file systems are mounted inside these directories, then problems may occur
+with files on the store server being renamed incorrectly. This will cause efficiency
+problems, but not affect the integrity of the backups.
+
+WARNING: Directories not checked against mountpoints. Check mounted filesystems manually.
+
+__E
+
+# create directories
+if(!-d $config_dir)
+{
+ printf "Creating $config_dir...\n";
+ mkdir $config_dir,0755 or die "Can't create $config_dir";
+}
+
+if(!-d "$config_dir/bbackupd")
+{
+ printf "Creating $config_dir/bbackupd\n";
+ mkdir "$config_dir/bbackupd",0700 or die "Can't create $config_dir/bbackupd";
+}
+
+if(!-d "$working_dir")
+{
+ printf "Creating $working_dir\n";
+ if(!mkdir($working_dir,0700))
+ {
+ die "Couldn't create $working_dir -- create this manually and try again\n";
+ }
+}
+
+# generate the private key for the server
+if(!-f $private_key)
+{
+ print "Generating private key...\n";
+ if(system("openssl genrsa -out $private_key 2048") != 0)
+ {
+ die "Couldn't generate private key."
+ }
+}
+
+# generate a certificate request
+if(!-f $certificate_request)
+{
+ die "Couldn't run openssl for CSR generation" unless
+ open(CSR,"|openssl req -new -key $private_key -sha1 -out $certificate_request");
+ print CSR <<__E;
+.
+.
+.
+.
+.
+BACKUP-$account_num
+.
+.
+.
+
+__E
+ close CSR;
+ print "\n\n";
+ die "Certificate request wasn't created.\n" unless -f $certificate_request
+}
+
+# generate the key material for the file
+if(!-f $enc_key_file)
+{
+ print "Generating keys for file backup\n";
+ if(system("openssl rand -out $enc_key_file 1024") != 0)
+ {
+ die "Couldn't generate file backup keys."
+ }
+}
+
+# write the notify when store full script
+print "Writing notify script $notify_script\n";
+open NOTIFY,">$notify_script" or die "Can't open for writing";
+
+my $hostname = `hostname`; chomp $hostname;
+my $current_username = `whoami`; chomp $current_username;
+my $sendmail = `whereis sendmail`; chomp $sendmail;
+$sendmail =~ s/\n.\Z//s;
+# for Linux style whereis
+$sendmail = $1 if $sendmail =~ /^sendmail:\s+([\S]+)/;
+# last ditch guess
+$sendmail = 'sendmail' if $sendmail !~ m/\S/;
+
+print NOTIFY <<__EOS;
+#!/bin/sh
+
+SUBJECT="BACKUP PROBLEM on host $hostname"
+SENDTO="$current_username"
+
+if [ \$1 = store-full ]
+then
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (store full)
+To: \$SENDTO
+
+
+The store account for $hostname is full.
+
+=============================
+FILES ARE NOT BEING BACKED UP
+=============================
+
+Please adjust the limits on account $account_num on server $server.
+
+EOM
+elif [ \$1 = read-error ]
+then
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (read errors)
+To: \$SENDTO
+
+
+Errors occured reading some files or directories for backup on $hostname.
+
+===================================
+THESE FILES ARE NOT BEING BACKED UP
+===================================
+
+Check the logs on $hostname for the files and directories which caused
+these errors, and take appropraite action.
+
+Other files are being backed up.
+
+EOM
+else
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (unknown)
+To: \$SENDTO
+
+
+The backup daemon on $hostname reported an unknown error.
+
+==========================
+FILES MAY NOT BE BACKED UP
+==========================
+
+Please check the logs on $hostname.
+
+EOM
+fi
+__EOS
+
+close NOTIFY;
+chmod 0700,$notify_script or die "Can't chmod $notify_script";
+
+
+# write the configuration file
+print "Writing configuration file $config_file\n";
+open CONFIG,">$config_file" or die "Can't open config file for writing";
+print CONFIG <<__E;
+
+StoreHostname = $server
+AccountNumber = 0x$account_num
+KeysFile = $enc_key_file
+
+CertificateFile = $certificate
+PrivateKeyFile = $private_key
+TrustedCAsFile = $ca_root_cert
+
+DataDirectory = $working_dir
+
+
+# This script is run whenever bbackupd encounters a problem which requires
+# the system administrator to assist:
+# 1) The store is full, and no more data can be uploaded.
+# 2) Some files or directories were not readable.
+# The default script emails the system administrator.
+
+NotifyScript = $notify_script
+
+__E
+
+if($backup_mode eq 'lazy')
+{
+ # lazy mode configuration
+ print CONFIG <<__E;
+
+# A scan of the local discs will be made once an hour (approximately).
+# To avoid cycles of load on the server, this time is randomly adjusted by a small
+# percentage as the daemon runs.
+
+UpdateStoreInterval = 3600
+
+
+# A file must have been modified at least 6 hours ago before it will be uploaded.
+
+MinimumFileAge = 21600
+
+
+# If a file is modified repeated, it won't be uploaded immediately in case it's modified again.
+# However, it should be uploaded eventually. This is how long we should wait after first noticing
+# a change. (1 day)
+
+MaxUploadWait = 86400
+
+__E
+}
+else
+{
+ # snapshot configuration
+ print CONFIG <<__E;
+
+# This configuration file is written for snapshot mode.
+# You will need to run bbackupctl to instruct the daemon to upload files.
+
+AutomaticBackup = no
+UpdateStoreInterval = 0
+MinimumFileAge = 0
+MaxUploadWait = 0
+
+__E
+}
+
+print CONFIG <<__E;
+
+# Files above this size (in bytes) are tracked, and if they are renamed they will simply be
+# renamed on the server, rather than being uploaded again. (64k - 1)
+
+FileTrackingSizeThreshold = 65535
+
+
+# The daemon does "changes only" uploads for files above this size (in bytes).
+# Files less than it are uploaded whole without this extra processing.
+
+DiffingUploadSizeThreshold = 8192
+
+
+# The limit on how much time is spent diffing files. Most files shouldn't take very long,
+# but if you have really big files you can use this to limit the time spent diffing them.
+# * Reduce if you are having problems with processor usage.
+# * Increase if you have large files, and think the upload of changes is too large and want
+# to spend more time searching for unchanged blocks.
+
+MaximumDiffingTime = 20
+
+
+# Uncomment this line to see exactly what the daemon is going when it's connected to the server.
+
+# ExtendedLogging = yes
+
+
+# Use this to temporarily stop bbackupd from syncronising or connecting to the store.
+# This specifies a program or script script which is run just before each sync, and ideally
+# the full path to the interpreter. It will be run as the same user bbackupd is running as,
+# usually root.
+# The script prints either "now" or a number to STDOUT (and a terminating newline, no quotes).
+# If the result was "now", then the sync will happen. If it's a number, then the script will
+# be asked again in that number of seconds.
+# For example, you could use this on a laptop to only backup when on a specific network.
+
+# SyncAllowScript = /path/to/intepreter/or/exe script-name parameters etc
+
+
+# Where the command socket is created in the filesystem.
+
+CommandSocket = /var/run/bbackupd.sock
+
+
+Server
+{
+ PidFile = /var/run/bbackupd.pid
+}
+
+#
+# BackupLocations specifies which locations on disc should be backed up. Each
+# directory is in the format
+#
+# name
+# {
+# Path = /path/of/directory
+# (optional exclude directives)
+# }
+#
+# 'name' is derived from the Path by the config script, but should merely be
+# unique.
+#
+# The exclude directives are of the form
+#
+# [Exclude|AlwaysInclude][File|Dir][|sRegex] = regex or full pathname
+#
+# (The regex suffix is shown as 'sRegex' to make File or Dir plural)
+#
+# For example:
+#
+# ExcludeDir = /home/guest-user
+# ExcludeFilesRegex = *.(mp3|MP3)\$
+# AlwaysIncludeFile = /home/username/veryimportant.mp3
+#
+# This excludes the directory /home/guest-user from the backup along with all mp3
+# files, except one MP3 file in particular.
+#
+# In general, Exclude excludes a file or directory, unless the directory is
+# explicitly mentioned in a AlwaysInclude directive.
+#
+# If a directive ends in Regex, then it is a regular expression rather than a
+# explicit full pathname. See
+#
+# man 7 re_format
+#
+# for the regex syntax on your platform.
+#
+
+BackupLocations
+{
+__E
+
+# write the dirs to backup
+for my $d (@tobackup)
+{
+ $d =~ m/\A.(.+)\Z/;
+ my $n = $1;
+ $n =~ tr`/`-`;
+
+ my $excludekeys = '';
+ if(substr($enc_key_file, 0, length($d)+1) eq $d.'/')
+ {
+ $excludekeys = "\t\tExcludeFile = $enc_key_file\n";
+ print <<__E;
+
+NOTE: Keys file has been explicitly excluded from the backup.
+
+__E
+ }
+
+ print CONFIG <<__E
+ $n
+ {
+ Path = $d
+$excludekeys }
+__E
+}
+
+print CONFIG "}\n\n";
+close CONFIG;
+
+# explain to the user what they need to do next
+my $daemon_args = ($config_file eq $default_config_location)?'':" $config_file";
+my $ctl_daemon_args = ($config_file eq $default_config_location)?'':" -c $config_file";
+
+print <<__E;
+
+===================================================================
+
+bbackupd basic configuration complete.
+
+What you need to do now...
+
+1) Make a backup of $enc_key_file
+ This should be a secure offsite backup.
+ Without it, you cannot restore backups. Everything else can
+ be replaced. But this cannot.
+ KEEP IT IN A SAFE PLACE, OTHERWISE YOUR BACKUPS ARE USELESS.
+
+2) Send $certificate_request
+ to the administrator of the backup server, and ask for it to
+ be signed.
+
+3) The administrator will send you two files. Install them as
+ $certificate
+ $ca_root_cert
+ after checking their authenticity.
+
+4) You may wish to read the configuration file
+ $config_file
+ and adjust as appropraite.
+
+ There are some notes in it on excluding files you do not
+ wish to be backed up.
+
+5) Review the script
+ $notify_script
+ and check that it will email the right person when the store
+ becomes full. This is important -- when the store is full, no
+ more files will be backed up. You want to know about this.
+
+6) Start the backup daemon with the command
+ /usr/local/bin/bbackupd$daemon_args
+ in /etc/rc.local, or your local equivalent.
+ Note that bbackupd must run as root.
+__E
+if($backup_mode eq 'snapshot')
+{
+ print <<__E;
+
+7) Set up a cron job to run whenever you want a snapshot of the
+ file system to be taken. Run the command
+ /usr/local/bin/bbackupctl -q$ctl_daemon_args sync
+__E
+}
+print <<__E;
+
+===================================================================
+
+Remember to make a secure, offsite backup of your backup keys,
+as described in step 1 above. If you do not, you have no backups.
+
+__E
+
diff --git a/bin/bbackupd/bbackupd.cpp b/bin/bbackupd/bbackupd.cpp
new file mode 100755
index 00000000..ca843105
--- /dev/null
+++ b/bin/bbackupd/bbackupd.cpp
@@ -0,0 +1,26 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: bbackupd.cpp
+// Purpose: main file for backup daemon
+// Created: 2003/10/11
+//
+// --------------------------------------------------------------------------
+
+#include "Box.h"
+#include "BackupDaemon.h"
+#include "MainHelper.h"
+#include "BoxPortsAndFiles.h"
+
+#include "MemLeakFindOn.h"
+
+int main(int argc, const char *argv[])
+{
+ MAINHELPER_START
+
+ BackupDaemon daemon;
+ return daemon.Main(BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
+
+ MAINHELPER_END
+}
+