summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris+github@qwirx.com>2008-05-28 15:24:05 +0000
committerChris Wilson <chris+github@qwirx.com>2008-05-28 15:24:05 +0000
commit1aaa7a42e7bd65902a07f29881b68acb45255d24 (patch)
treefe8983cd01778c757dea772933ca6654c852ab3c
parent441373e3cf2b6c1f4f119c4a4b0d494a6645b5c2 (diff)
Track and log file deletions by name.
Split crypto init and file sync process into its own method, to reduce call depth and facilitate calling in process from tests. Differentiate between 3 uses of stat in BackupClientDirectoryRecord by renaming the structures. Use stat instead of lstat when checking the filesystem that's holding an entity, in case it's a symbolic link to a different filesystem.
-rw-r--r--bin/bbackupd/BackupClientContext.cpp6
-rw-r--r--bin/bbackupd/BackupClientContext.h11
-rw-r--r--bin/bbackupd/BackupClientDeleteList.cpp80
-rw-r--r--bin/bbackupd/BackupClientDeleteList.h34
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.cpp380
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.h58
-rw-r--r--bin/bbackupd/BackupDaemon.cpp665
-rw-r--r--bin/bbackupd/BackupDaemon.h48
8 files changed, 782 insertions, 500 deletions
diff --git a/bin/bbackupd/BackupClientContext.cpp b/bin/bbackupd/BackupClientContext.cpp
index 146f7cb8..47826211 100644
--- a/bin/bbackupd/BackupClientContext.cpp
+++ b/bin/bbackupd/BackupClientContext.cpp
@@ -48,7 +48,8 @@ BackupClientContext::BackupClientContext
int32_t AccountNumber,
bool ExtendedLogging,
bool ExtendedLogToFile,
- std::string ExtendedLogFile
+ std::string ExtendedLogFile,
+ ProgressNotifier& rProgressNotifier
)
: mrDaemon(rDaemon),
mrTLSContext(rTLSContext),
@@ -69,7 +70,8 @@ BackupClientContext::BackupClientContext
mpExcludeFiles(0),
mpExcludeDirs(0),
mKeepAliveTimer(0),
- mbIsManaged(false)
+ mbIsManaged(false),
+ mrProgressNotifier(rProgressNotifier)
{
}
diff --git a/bin/bbackupd/BackupClientContext.h b/bin/bbackupd/BackupClientContext.h
index 1504cc72..0e82cf24 100644
--- a/bin/bbackupd/BackupClientContext.h
+++ b/bin/bbackupd/BackupClientContext.h
@@ -12,6 +12,7 @@
#include "BoxTime.h"
#include "BackupClientDeleteList.h"
+#include "BackupClientDirectoryRecord.h"
#include "BackupStoreFile.h"
#include "ExcludeList.h"
#include "Timer.h"
@@ -45,7 +46,8 @@ public:
int32_t AccountNumber,
bool ExtendedLogging,
bool ExtendedLogToFile,
- std::string ExtendedLogFile
+ std::string ExtendedLogFile,
+ ProgressNotifier &rProgressNotifier
);
virtual ~BackupClientContext();
private:
@@ -70,6 +72,7 @@ public:
int64_t GetClientStoreMarker() const {return mClientStoreMarker;}
bool StorageLimitExceeded() {return mStorageLimitExceeded;}
+ void SetStorageLimitExceeded() {mStorageLimitExceeded = true;}
// --------------------------------------------------------------------------
//
@@ -198,6 +201,11 @@ public:
virtual int GetMaximumDiffingTime();
virtual bool IsManaged() { return mbIsManaged; }
+ ProgressNotifier& GetProgressNotifier() const
+ {
+ return mrProgressNotifier;
+ }
+
private:
BackupDaemon &mrDaemon;
TLSContext &mrTLSContext;
@@ -221,6 +229,7 @@ private:
bool mbIsManaged;
int mKeepAliveTime;
int mMaximumDiffingTime;
+ ProgressNotifier &mrProgressNotifier;
};
#endif // BACKUPCLIENTCONTEXT__H
diff --git a/bin/bbackupd/BackupClientDeleteList.cpp b/bin/bbackupd/BackupClientDeleteList.cpp
index f6d8e0dc..b9b5b53e 100644
--- a/bin/bbackupd/BackupClientDeleteList.cpp
+++ b/bin/bbackupd/BackupClientDeleteList.cpp
@@ -42,21 +42,38 @@ BackupClientDeleteList::~BackupClientDeleteList()
{
}
+BackupClientDeleteList::FileToDelete::FileToDelete(int64_t DirectoryID,
+ const BackupStoreFilename& rFilename,
+ const std::string& rLocalPath)
+: mDirectoryID(DirectoryID),
+ mFilename(rFilename),
+ mLocalPath(rLocalPath)
+{ }
+
+BackupClientDeleteList::DirToDelete::DirToDelete(int64_t ObjectID,
+ const std::string& rLocalPath)
+: mObjectID(ObjectID),
+ mLocalPath(rLocalPath)
+{ }
+
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t)
+// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t,
+// const BackupStoreFilename&)
// Purpose: Add a directory to the list of directories to be deleted.
// Created: 10/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
+void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID,
+ const std::string& rLocalPath)
{
// Only add the delete to the list if it's not in the "no delete" set
- if(mDirectoryNoDeleteList.find(ObjectID) == mDirectoryNoDeleteList.end())
+ if(mDirectoryNoDeleteList.find(ObjectID) ==
+ mDirectoryNoDeleteList.end())
{
// Not in the list, so should delete it
- mDirectoryList.push_back(ObjectID);
+ mDirectoryList.push_back(DirToDelete(ObjectID, rLocalPath));
}
}
@@ -64,18 +81,22 @@ void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDeleteList::AddFileDelete(int64_t, BackupStoreFilenameClear &)
+// Name: BackupClientDeleteList::AddFileDelete(int64_t,
+// const BackupStoreFilename &)
// Purpose:
// Created: 10/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename, const std::string& rLocalPath)
{
// Try to find it in the no delete list
- std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileNoDeleteList.begin());
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator
+ delEntry(mFileNoDeleteList.begin());
while(delEntry != mFileNoDeleteList.end())
{
- if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ if((delEntry)->first == DirectoryID
+ && (delEntry)->second == rFilename)
{
// Found!
break;
@@ -86,7 +107,8 @@ void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStor
// Only add it to the delete list if it wasn't in the no delete list
if(delEntry == mFileNoDeleteList.end())
{
- mFileList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ mFileList.push_back(FileToDelete(DirectoryID, rFilename,
+ rLocalPath));
}
}
@@ -113,18 +135,24 @@ void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
BackupProtocolClient &connection(rContext.GetConnection());
// Do the deletes
- for(std::vector<int64_t>::iterator i(mDirectoryList.begin()); i != mDirectoryList.end(); ++i)
+ for(std::vector<DirToDelete>::iterator i(mDirectoryList.begin());
+ i != mDirectoryList.end(); ++i)
{
- connection.QueryDeleteDirectory(*i);
+ connection.QueryDeleteDirectory(i->mObjectID);
+ rContext.GetProgressNotifier().NotifyDirectoryDeleted(
+ i->mObjectID, i->mLocalPath);
}
// Clear the directory list
mDirectoryList.clear();
// Delete the files
- for(std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator i(mFileList.begin()); i != mFileList.end(); ++i)
+ for(std::vector<FileToDelete>::iterator i(mFileList.begin());
+ i != mFileList.end(); ++i)
{
- connection.QueryDeleteFile(i->first, i->second);
+ connection.QueryDeleteFile(i->mDirectoryID, i->mFilename);
+ rContext.GetProgressNotifier().NotifyFileDeleted(
+ i->mDirectoryID, i->mLocalPath);
}
}
@@ -140,7 +168,15 @@ void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
{
// First of all, is it in the delete vector?
- std::vector<int64_t>::iterator delEntry(std::find(mDirectoryList.begin(), mDirectoryList.end(), ObjectID));
+ std::vector<DirToDelete>::iterator delEntry(mDirectoryList.begin());
+ for(; delEntry != mDirectoryList.end(); delEntry++)
+ {
+ if(delEntry->mObjectID == ObjectID)
+ {
+ // Found!
+ break;
+ }
+ }
if(delEntry != mDirectoryList.end())
{
// erase this entry
@@ -148,7 +184,8 @@ void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
}
else
{
- // Haven't been asked to delete it yet, put it in the no delete list
+ // Haven't been asked to delete it yet, put it in the
+ // no delete list
mDirectoryNoDeleteList.insert(ObjectID);
}
}
@@ -162,13 +199,15 @@ void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
// Created: 19/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename)
{
// Find this in the delete list
- std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileList.begin());
+ std::vector<FileToDelete>::iterator delEntry(mFileList.begin());
while(delEntry != mFileList.end())
{
- if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ if(delEntry->mDirectoryID == DirectoryID
+ && delEntry->mFilename == rFilename)
{
// Found!
break;
@@ -186,10 +225,5 @@ void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupS
// Haven't been asked to delete it yet, put it in the no delete list
mFileNoDeleteList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
}
-
}
-
-
-
-
diff --git a/bin/bbackupd/BackupClientDeleteList.h b/bin/bbackupd/BackupClientDeleteList.h
index 5940cf50..b0fbf51a 100644
--- a/bin/bbackupd/BackupClientDeleteList.h
+++ b/bin/bbackupd/BackupClientDeleteList.h
@@ -28,22 +28,46 @@ class BackupClientContext;
// --------------------------------------------------------------------------
class BackupClientDeleteList
{
+private:
+ class FileToDelete
+ {
+ public:
+ int64_t mDirectoryID;
+ BackupStoreFilename mFilename;
+ std::string mLocalPath;
+ FileToDelete(int64_t DirectoryID,
+ const BackupStoreFilename& rFilename,
+ const std::string& rLocalPath);
+ };
+
+ class DirToDelete
+ {
+ public:
+ int64_t mObjectID;
+ std::string mLocalPath;
+ DirToDelete(int64_t ObjectID, const std::string& rLocalPath);
+ };
+
public:
BackupClientDeleteList();
~BackupClientDeleteList();
- void AddDirectoryDelete(int64_t ObjectID);
- void AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+ void AddDirectoryDelete(int64_t ObjectID,
+ const std::string& rLocalPath);
+ void AddFileDelete(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename,
+ const std::string& rLocalPath);
void StopDirectoryDeletion(int64_t ObjectID);
- void StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+ void StopFileDeletion(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename);
void PerformDeletions(BackupClientContext &rContext);
private:
- std::vector<int64_t> mDirectoryList;
+ std::vector<DirToDelete> mDirectoryList;
std::set<int64_t> mDirectoryNoDeleteList; // note: things only get in this list if they're not present in mDirectoryList when they are 'added'
- std::vector<std::pair<int64_t, BackupStoreFilename> > mFileList;
+ std::vector<FileToDelete> mFileList;
std::vector<std::pair<int64_t, BackupStoreFilename> > mFileNoDeleteList;
};
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.cpp b/bin/bbackupd/BackupClientDirectoryRecord.cpp
index b8d93493..5734d0d7 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.cpp
+++ b/bin/bbackupd/BackupClientDirectoryRecord.cpp
@@ -2,7 +2,8 @@
//
// File
// Name: BackupClientDirectoryRecord.cpp
-// Purpose: Implementation of record about directory for backup client
+// Purpose: Implementation of record about directory for
+// backup client
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
@@ -100,14 +101,25 @@ void BackupClientDirectoryRecord::DeleteSubDirectories()
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &, int64_t, const std::string &, bool)
-// Purpose: Syncronise, recusively, a local directory with the server.
+// Name: BackupClientDirectoryRecord::SyncDirectory(i
+// BackupClientDirectoryRecord::SyncParams &,
+// int64_t, const std::string &,
+// const std::string &, bool)
+// Purpose: Recursively synchronise a local directory
+// with the server.
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
-void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &rParams, int64_t ContainingDirectoryID,
- const std::string &rLocalPath, bool ThisDirHasJustBeenCreated)
+void BackupClientDirectoryRecord::SyncDirectory(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ int64_t ContainingDirectoryID,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ bool ThisDirHasJustBeenCreated)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
// Signal received by daemon?
if(rParams.mrDaemon.StopRun())
{
@@ -118,49 +130,66 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Start by making some flag changes, marking this sync as not done,
// and on the immediate sub directories.
mSyncDone = false;
- for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator
+ i = mSubDirectories.begin();
i != mSubDirectories.end(); ++i)
{
i->second->mSyncDone = false;
}
- // Work out the time in the future after which the file should be uploaded regardless.
- // This is a simple way to avoid having too many problems with file servers when they have
- // clients with badly out of sync clocks.
- rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() + rParams.mMaxFileTimeInFuture;
+ // Work out the time in the future after which the file should
+ // be uploaded regardless. This is a simple way to avoid having
+ // too many problems with file servers when they have clients
+ // with badly out of sync clocks.
+ rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() +
+ rParams.mMaxFileTimeInFuture;
- // Build the current state checksum to compare against while getting info from dirs
- // Note checksum is used locally only, so byte order isn't considered.
+ // Build the current state checksum to compare against while
+ // getting info from dirs. Note checksum is used locally only,
+ // so byte order isn't considered.
MD5Digest currentStateChecksum;
+ struct stat dest_st;
// Stat the directory, to get attribute info
+ // If it's a symbolic link, we want the link target here
+ // (as we're about to back up the contents of the directory)
{
- struct stat st;
- if(::stat(rLocalPath.c_str(), &st) != 0)
+ if(::stat(rLocalPath.c_str(), &dest_st) != 0)
{
- // The directory has probably been deleted, so just ignore this error.
- // In a future scan, this deletion will be noticed, deleted from server, and this object deleted.
- rParams.GetProgressNotifier().NotifyDirStatFailed(
- this, rLocalPath, strerror(errno));
+ // The directory has probably been deleted, so
+ // just ignore this error. In a future scan, this
+ // deletion will be noticed, deleted from server,
+ // and this object deleted.
+ rNotifier.NotifyDirStatFailed(this, rLocalPath,
+ strerror(errno));
return;
}
- // Store inode number in map so directories are tracked in case they're renamed
+ // Store inode number in map so directories are tracked
+ // in case they're renamed
{
- BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
- idMap.AddToMap(st.st_ino, mObjectID, ContainingDirectoryID);
+ BackupClientInodeToIDMap &idMap(
+ rParams.mrContext.GetNewIDMap());
+ idMap.AddToMap(dest_st.st_ino, mObjectID,
+ ContainingDirectoryID);
}
// Add attributes to checksum
- currentStateChecksum.Add(&st.st_mode, sizeof(st.st_mode));
- currentStateChecksum.Add(&st.st_uid, sizeof(st.st_uid));
- currentStateChecksum.Add(&st.st_gid, sizeof(st.st_gid));
+ currentStateChecksum.Add(&dest_st.st_mode,
+ sizeof(dest_st.st_mode));
+ currentStateChecksum.Add(&dest_st.st_uid,
+ sizeof(dest_st.st_uid));
+ currentStateChecksum.Add(&dest_st.st_gid,
+ sizeof(dest_st.st_gid));
// Inode to be paranoid about things moving around
- currentStateChecksum.Add(&st.st_ino, sizeof(st.st_ino));
+ currentStateChecksum.Add(&dest_st.st_ino,
+ sizeof(dest_st.st_ino));
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
- currentStateChecksum.Add(&st.st_flags, sizeof(st.st_flags));
+ currentStateChecksum.Add(&dest_st.st_flags,
+ sizeof(dest_st.st_flags));
#endif
StreamableMemBlock xattr;
- BackupClientFileAttributes::FillExtendedAttr(xattr, rLocalPath.c_str());
+ BackupClientFileAttributes::FillExtendedAttr(xattr,
+ rLocalPath.c_str());
currentStateChecksum.Add(xattr.GetBuffer(), xattr.GetSize());
}
@@ -170,13 +199,13 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
std::vector<std::string> files;
bool downloadDirectoryRecordBecauseOfFutureFiles = false;
- struct stat dir_st;
- if(::lstat(rLocalPath.c_str(), &dir_st) != 0)
+ struct stat link_st;
+ if(::lstat(rLocalPath.c_str(), &link_st) != 0)
{
// Report the error (logs and
// eventual email to administrator)
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
- rLocalPath, strerror(errno));
+ rNotifier.NotifyFileStatFailed(this, rLocalPath,
+ strerror(errno));
// FIXME move to NotifyFileStatFailed()
SetErrorWhenReadingFilesystemObject(rParams,
@@ -192,8 +221,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
DIR *dirHandle = 0;
try
{
- rParams.GetProgressNotifier().NotifyScanDirectory(
- this, rLocalPath);
+ rNotifier.NotifyScanDirectory(this, rLocalPath);
dirHandle = ::opendir(rLocalPath.c_str());
if(dirHandle == 0)
@@ -202,17 +230,19 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// eventual email to administrator)
if (errno == EACCES)
{
- rParams.GetProgressNotifier().NotifyDirListFailed(
- this, rLocalPath, "Access denied");
+ rNotifier.NotifyDirListFailed(this,
+ rLocalPath, "Access denied");
}
else
{
- rParams.GetProgressNotifier().NotifyDirListFailed(this,
+ rNotifier.NotifyDirListFailed(this,
rLocalPath, strerror(errno));
}
- // Report the error (logs and eventual email to administrator)
- SetErrorWhenReadingFilesystemObject(rParams, rLocalPath.c_str());
+ // Report the error (logs and eventual email
+ // to administrator)
+ SetErrorWhenReadingFilesystemObject(rParams,
+ rLocalPath.c_str());
// Ignore this directory for now.
return;
}
@@ -228,14 +258,17 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
::memset(&checksum_info, 0, sizeof(checksum_info));
struct dirent *en = 0;
- struct stat st;
+ struct stat file_st;
std::string filename;
while((en = ::readdir(dirHandle)) != 0)
{
rParams.mrContext.DoKeepAlive();
- // Don't need to use LinuxWorkaround_FinishDirentStruct(en, rLocalPath.c_str());
- // on Linux, as a stat is performed to get all this info
+ // Don't need to use
+ // LinuxWorkaround_FinishDirentStruct(en,
+ // rLocalPath.c_str());
+ // on Linux, as a stat is performed to
+ // get all this info
if(en->d_name[0] == '.' &&
(en->d_name[1] == '\0' || (en->d_name[1] == '.' && en->d_name[2] == '\0')))
@@ -259,11 +292,11 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// prefer S_IFREG, S_IFDIR...
int type = en->d_type;
#else
- if(::lstat(filename.c_str(), &st) != 0)
+ if(::lstat(filename.c_str(), &file_st) != 0)
{
// Report the error (logs and
// eventual email to administrator)
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename, strerror(errno));
// FIXME move to NotifyFileStatFailed()
@@ -274,19 +307,18 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
continue;
}
- if(st.st_dev != dir_st.st_dev)
+ if(file_st.st_dev != dest_st.st_dev)
{
if(!(rParams.mrContext.ExcludeDir(
filename)))
{
- rParams.GetProgressNotifier()
- .NotifyMountPointSkipped(
- this, filename);
+ rNotifier.NotifyMountPointSkipped(
+ this, filename);
}
continue;
}
- int type = st.st_mode & S_IFMT;
+ int type = file_st.st_mode & S_IFMT;
#endif
if(type == S_IFREG || type == S_IFLNK)
@@ -296,8 +328,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeFile(filename))
{
- rParams.GetProgressNotifier()
- .NotifyFileExcluded(
+ rNotifier.NotifyFileExcluded(
this,
filename);
@@ -315,8 +346,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeDir(filename))
{
- rParams.GetProgressNotifier()
- .NotifyDirExcluded(
+ rNotifier.NotifyDirExcluded(
this,
filename);
@@ -331,15 +361,13 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
{
if(rParams.mrContext.ExcludeFile(filename))
{
- rParams.GetProgressNotifier()
- .NotifyFileExcluded(
+ rNotifier.NotifyFileExcluded(
this,
filename);
}
else
{
- rParams.GetProgressNotifier()
- .NotifyUnsupportedFileType(
+ rNotifier.NotifyUnsupportedFileType(
this, filename);
SetErrorWhenReadingFilesystemObject(
rParams, filename.c_str());
@@ -354,10 +382,9 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
#ifdef WIN32
// We didn't stat the file before,
// but now we need the information.
- if(::lstat(filename.c_str(), &st) != 0)
+ if(::lstat(filename.c_str(), &file_st) != 0)
{
- rParams.GetProgressNotifier()
- .NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename,
strerror(errno));
@@ -370,18 +397,17 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
continue;
}
- if(st.st_dev != dir_st.st_dev)
+ if(file_st.st_dev != link_st.st_dev)
{
- rParams.GetProgressNotifier()
- .NotifyMountPointSkipped(this,
+ rNotifier.NotifyMountPointSkipped(this,
filename);
continue;
}
#endif
- checksum_info.mModificationTime = FileModificationTime(st);
- checksum_info.mAttributeModificationTime = FileAttrModificationTime(st);
- checksum_info.mSize = st.st_size;
+ checksum_info.mModificationTime = FileModificationTime(file_st);
+ checksum_info.mAttributeModificationTime = FileAttrModificationTime(file_st);
+ checksum_info.mSize = file_st.st_size;
currentStateChecksum.Add(&checksum_info, sizeof(checksum_info));
currentStateChecksum.Add(en->d_name, strlen(en->d_name));
@@ -394,7 +420,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Log that this has happened
if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
{
- rParams.GetProgressNotifier().NotifyFileModifiedInFuture(
+ rNotifier.NotifyFileModifiedInFuture(
this, filename);
rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
}
@@ -468,7 +494,8 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
}
// Do the directory reading
- bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath, pdirOnStore, entriesLeftOver, files, dirs);
+ bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath,
+ rRemotePath, pdirOnStore, entriesLeftOver, files, dirs);
// LAST THING! (think exception safety)
// Store the new checksum -- don't fetch things unnecessarily in the future
@@ -604,11 +631,18 @@ void BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::
// Created: 2003/10/09
//
// --------------------------------------------------------------------------
-bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &rParams,
- const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+bool BackupClientDirectoryRecord::UpdateItems(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ BackupStoreDirectory *pDirOnStore,
std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
- std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs)
+ std::vector<std::string> &rFiles,
+ const std::vector<std::string> &rDirs)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
bool allUpdatedSuccessfully = true;
// Decrypt all the directory entries.
@@ -634,7 +668,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
f != rFiles.end(); ++f)
{
// Send keep-alive message if needed
- rParams.mrContext.DoKeepAlive();
+ rContext.DoKeepAlive();
// Filename of this file
std::string filename(MakeFullPath(rLocalPath, *f));
@@ -651,7 +685,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
struct stat st;
if(::lstat(filename.c_str(), &st) != 0)
{
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename, strerror(errno));
// Report the error (logs and
@@ -689,7 +723,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == 0))
{
// Directory exists in the place of this file -- sort it out
- RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore, en->GetObjectID(), *f);
+ RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore,
+ en, *f);
en = 0;
}
@@ -701,7 +736,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// 2) It's not in the store
// Do we know about the inode number?
- const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ const BackupClientInodeToIDMap &idMap(rContext.GetCurrentIDMap());
int64_t renameObjectID = 0, renameInDirectory = 0;
if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
{
@@ -711,7 +746,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
bool isCurrentVersion = false;
box_time_t srvModTime = 0, srvAttributesHash = 0;
BackupStoreFilenameClear oldLeafname;
- if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
+ if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
{
// Only interested if it's a file and the latest version
if(!isDir && isCurrentVersion)
@@ -724,11 +759,11 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Therefore we can safely rename it to this new file.
// Get the connection to the server
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Only do this step if there is room on the server.
// This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ if(!rContext.StorageLimitExceeded())
{
// Rename the existing files (ie include old versions) on the server
connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
@@ -736,7 +771,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
storeFilename);
// Stop the attempt to delete the file in the original location
- BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdelList(rContext.GetDeleteList());
rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
// Create new entry in the directory for it
@@ -871,16 +906,22 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
<< rParams.mSyncPeriodEnd << ")");
}
+ bool fileSynced = true;
+
if (doUpload)
{
+ // Upload needed, don't mark sync success until
+ // we've actually done it
+ fileSynced = false;
+
// Make sure we're connected -- must connect here so we know whether
// the storage limit has been exceeded, and hence whether or not
// to actually upload the file.
- rParams.mrContext.GetConnection();
+ rContext.GetConnection();
// Only do this step if there is room on the server.
// This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ if(!rContext.StorageLimitExceeded())
{
// Upload the file to the server, recording the object ID it returns
bool noPreviousVersionOnServer = ((pDirOnStore != 0) && (en == 0));
@@ -890,15 +931,27 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
try
{
latestObjectID = UploadFile(rParams, filename, storeFilename, fileSize, modTime, attributesHash, noPreviousVersionOnServer);
- uploadSuccess = true;
+ if (latestObjectID == 0)
+ {
+ // storage limit exceeded
+ rParams.mrContext.SetStorageLimitExceeded();
+ uploadSuccess = false;
+ allUpdatedSuccessfully = false;
+ }
+ else
+ {
+ uploadSuccess = true;
+ }
}
catch(ConnectionException &e)
{
// Connection errors should just be passed on to the main handler, retries
// would probably just cause more problems.
- rParams.GetProgressNotifier()
- .NotifyFileUploadException(
- this, filename, e);
+ // StorageLimitExceeded never gets here.
+
+ rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ rNotifier.NotifyFileUploadException(
+ this, filename, e);
throw;
}
catch(BoxException &e)
@@ -907,14 +960,15 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
allUpdatedSuccessfully = false;
// Log it.
SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
- rParams.GetProgressNotifier()
- .NotifyFileUploadException(
- this, filename, e);
+ rNotifier.NotifyFileUploadException(
+ this, filename, e);
}
// Update structures if the file was uploaded successfully.
if(uploadSuccess)
{
+ fileSynced = true;
+
// delete from pending entries
if(pendingFirstSeenTime != 0 && mpPendingEntries != 0)
{
@@ -924,28 +978,31 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
else
{
- rParams.GetProgressNotifier().NotifyFileSkippedServerFull(this,
+ rNotifier.NotifyFileSkippedServerFull(this,
filename);
}
}
else if(en != 0 && en->GetAttributesHash() != attributesHash)
{
// Attributes have probably changed, upload them again.
- // If the attributes have changed enough, the directory hash will have changed too,
- // and so the dir will have been downloaded, and the entry will be available.
+ // If the attributes have changed enough, the directory
+ // hash will have changed too, and so the dir will have
+ // been downloaded, and the entry will be available.
// Get connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Only do this step if there is room on the server.
- // This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ // This step will be repeated later when there is
+ // space available
+ if(!rContext.StorageLimitExceeded())
{
// Update store
BackupClientFileAttributes attr;
attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
MemBlockStream attrStream(attr);
connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
+ fileSynced = true;
}
}
@@ -981,7 +1038,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
if(fileSize >= rParams.mFileTrackingSizeThreshold)
{
// Get the map
- BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+ BackupClientInodeToIDMap &idMap(rContext.GetNewIDMap());
// Need to get an ID from somewhere...
if(latestObjectID != 0)
@@ -993,7 +1050,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
{
// Don't know it -- haven't sent anything to the store, and didn't get a listing.
// Look it up in the current map, and if it's there, use that.
- const BackupClientInodeToIDMap &currentIDMap(rParams.mrContext.GetCurrentIDMap());
+ const BackupClientInodeToIDMap &currentIDMap(rContext.GetCurrentIDMap());
int64_t objid = 0, dirid = 0;
if(currentIDMap.Lookup(inodeNum, objid, dirid))
{
@@ -1002,15 +1059,18 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// NOTE: If the above assert fails, an inode number has been reused by the OS,
// or there is a problem somewhere. If this happened on a short test run, look
// into it. However, in a long running process this may happen occasionally and
- // not indiciate anything wrong.
+ // not indicate anything wrong.
// Run the release version for real life use, where this check is not made.
idMap.AddToMap(inodeNum, objid, mObjectID /* containing directory */);
}
}
}
- rParams.GetProgressNotifier().NotifyFileSynchronised(this,
- filename, fileSize);
+ if (fileSynced)
+ {
+ rNotifier.NotifyFileSynchronised(this, filename,
+ fileSize);
+ }
}
// Erase contents of files to save space when recursing
@@ -1030,7 +1090,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
d != rDirs.end(); ++d)
{
// Send keep-alive message if needed
- rParams.mrContext.DoKeepAlive();
+ rContext.DoKeepAlive();
// Get the local filename
std::string dirname(MakeFullPath(rLocalPath, *d));
@@ -1050,16 +1110,20 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Check that the entry which might have been found is in fact a directory
if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == 0))
{
- // Entry exists, but is not a directory. Bad. Get rid of it.
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ // Entry exists, but is not a directory. Bad.
+ // Get rid of it.
+ BackupProtocolClient &connection(rContext.GetConnection());
connection.QueryDeleteFile(mObjectID /* in directory */, storeFilename);
+ rNotifier.NotifyFileDeleted(en->GetObjectID(),
+ storeFilename.GetClearFilename());
// Nothing found
en = 0;
}
- // Flag for having created directory, so can optimise the recusive call not to
- // read it again, because we know it's empty.
+ // Flag for having created directory, so can optimise the
+ // recusive call not to read it again, because we know
+ // it's empty.
bool haveJustCreatedDirOnServer = false;
// Next, see if it's in the list of sub directories
@@ -1086,7 +1150,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// No. Exists on the server, and we know about it from the listing.
subDirObjectID = en->GetObjectID();
}
- else if(rParams.mrContext.StorageLimitExceeded())
+ else if(rContext.StorageLimitExceeded())
// know we've got a connection if we get this far,
// as dir will have been modified.
{
@@ -1112,14 +1176,15 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// First, do we have a record in the ID map?
int64_t renameObjectID = 0, renameInDirectory = 0;
bool renameDir = false;
- const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ const BackupClientInodeToIDMap &idMap(
+ rContext.GetCurrentIDMap());
if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
{
// Look up on the server to get the name, to build the local filename
std::string localPotentialOldName;
bool isDir = false;
bool isCurrentVersion = false;
- if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
+ if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
{
// Only interested if it's a directory
if(isDir && isCurrentVersion)
@@ -1137,7 +1202,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
// Get connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Don't do a check for storage limit exceeded here, because if we get to this
// stage, a connection will have been opened, and the status known, so the check
@@ -1157,7 +1222,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
connection.QueryChangeDirAttributes(renameObjectID, attrModTime, attrStream);
// Stop it being deleted later
- BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdelList(
+ rContext.GetDeleteList());
rdelList.StopDirectoryDeletion(renameObjectID);
// This is the ID for the renamed directory
@@ -1194,12 +1260,14 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
}
- ASSERT(psubDirRecord != 0 || rParams.mrContext.StorageLimitExceeded());
+ ASSERT(psubDirRecord != 0 || rContext.StorageLimitExceeded());
if(psubDirRecord)
{
// Sync this sub directory too
- psubDirRecord->SyncDirectory(rParams, mObjectID, dirname, haveJustCreatedDirOnServer);
+ psubDirRecord->SyncDirectory(rParams, mObjectID,
+ dirname, rRemotePath + "/" + *d,
+ haveJustCreatedDirOnServer);
}
// Zero pointer in rEntriesLeftOver, if we have a pointer to zero
@@ -1228,20 +1296,26 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// to a list, which is actually deleted at the very end of the session.
// If there's an error during the process, it doesn't matter if things
// aren't actually deleted, as the whole state will be reset anyway.
- BackupClientDeleteList &rdel(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdel(rContext.GetDeleteList());
+
+ std::string localName = MakeFullPath(rLocalPath,
+ en->GetName());
// Delete this entry -- file or directory?
if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
{
// Set a pending deletion for the file
- rdel.AddFileDelete(mObjectID, en->GetName());
+ rdel.AddFileDelete(mObjectID, en->GetName(),
+ localName);
}
else if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
{
// Set as a pending deletion for the directory
- rdel.AddDirectoryDelete(en->GetObjectID());
+ rdel.AddDirectoryDelete(en->GetObjectID(),
+ localName);
- // If there's a directory record for it in the sub directory map, delete it now
+ // If there's a directory record for it in
+ // the sub directory map, delete it now
BackupStoreFilenameClear dirname(en->GetName());
std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(dirname.GetClearFilename()));
if(e != mSubDirectories.end())
@@ -1276,14 +1350,24 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Created: 9/7/04
//
// --------------------------------------------------------------------------
-void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename)
+void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(
+ SyncParams &rParams,
+ BackupStoreDirectory* pDirOnStore,
+ BackupStoreDirectory::Entry* pEntry,
+ const std::string &rFilename)
{
// First, delete the directory
BackupProtocolClient &connection(rParams.mrContext.GetConnection());
- connection.QueryDeleteDirectory(ObjectID);
+ connection.QueryDeleteDirectory(pEntry->GetObjectID());
+
+ BackupStoreFilenameClear clear(pEntry->GetName());
+ rParams.mrContext.GetProgressNotifier().NotifyDirectoryDeleted(
+ pEntry->GetObjectID(), clear.GetClearFilename());
// Then, delete any directory record
- std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(rFilename));
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator
+ e(mSubDirectories.find(rFilename));
+
if(e != mSubDirectories.end())
{
// A record exists for this, remove it
@@ -1300,16 +1384,30 @@ void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rPara
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &, const std::string &, const BackupStoreFilename &, int64_t, box_time_t, box_time_t, bool)
-// Purpose: Private. Upload a file to the server -- may send a patch instead of the whole thing
+// Name: BackupClientDirectoryRecord::UploadFile(
+// BackupClientDirectoryRecord::SyncParams &,
+// const std::string &,
+// const BackupStoreFilename &,
+// int64_t, box_time_t, box_time_t, bool)
+// Purpose: Private. Upload a file to the server. May send
+// a patch instead of the whole thing
// Created: 20/1/04
//
// --------------------------------------------------------------------------
-int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
- int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer)
+int64_t BackupClientDirectoryRecord::UploadFile(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rFilename,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize,
+ box_time_t ModificationTime,
+ box_time_t AttributesHash,
+ bool NoPreviousVersionOnServer)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
// Get the connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Info
int64_t objID = 0;
@@ -1318,8 +1416,10 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
// Use a try block to catch store full errors
try
{
- // Might an old version be on the server, and is the file size over the diffing threshold?
- if(!NoPreviousVersionOnServer && FileSize >= rParams.mDiffingUploadSizeThreshold)
+ // Might an old version be on the server, and is the file
+ // size over the diffing threshold?
+ if(!NoPreviousVersionOnServer &&
+ FileSize >= rParams.mDiffingUploadSizeThreshold)
{
// YES -- try to do diff, if possible
// First, query the server to see if there's an old version available
@@ -1329,7 +1429,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(diffFromID != 0)
{
// Found an old version
- rParams.GetProgressNotifier().NotifyFileUploadingPatch(this,
+ rNotifier.NotifyFileUploadingPatch(this,
rFilename);
// Get the index
@@ -1339,7 +1439,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
// Diff the file
//
- rParams.mrContext.ManageDiffProcess();
+ rContext.ManageDiffProcess();
bool isCompletelyDifferent = false;
std::auto_ptr<IOStream> patchStream(
@@ -1348,11 +1448,11 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
mObjectID, /* containing directory */
rStoreFilename, diffFromID, *blockIndexStream,
connection.GetTimeout(),
- &rParams.mrContext, // DiffTimer implementation
+ &rContext, // DiffTimer implementation
0 /* not interested in the modification time */,
&isCompletelyDifferent));
- rParams.mrContext.UnManageDiffProcess();
+ rContext.UnManageDiffProcess();
//
// Upload the patch to the store
@@ -1360,6 +1460,9 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
AttributesHash, isCompletelyDifferent?(0):(diffFromID), rStoreFilename, *patchStream));
+ // Get object ID from the result
+ objID = stored->GetObjectID();
+
// Don't attempt to upload it again!
doNormalUpload = false;
}
@@ -1368,8 +1471,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(doNormalUpload)
{
// below threshold or nothing to diff from, so upload whole
- rParams.GetProgressNotifier().NotifyFileUploading(this,
- rFilename);
+ rNotifier.NotifyFileUploading(this, rFilename);
// Prepare to upload, getting a stream which will encode the file as we go along
std::auto_ptr<IOStream> upload(
@@ -1390,7 +1492,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
}
catch(BoxException &e)
{
- rParams.mrContext.UnManageDiffProcess();
+ rContext.UnManageDiffProcess();
if(e.GetType() == ConnectionException::ExceptionType && e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
{
@@ -1404,10 +1506,13 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
{
// The hard limit was exceeded on the server, notify!
rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ // return an error code instead of
+ // throwing an exception that we
+ // can't debug.
+ return 0;
}
- rParams.GetProgressNotifier()
- .NotifyFileUploadServerError(
- this, rFilename, type, subtype);
+ rNotifier.NotifyFileUploadServerError(this,
+ rFilename, type, subtype);
}
}
@@ -1415,7 +1520,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
throw;
}
- rParams.GetProgressNotifier().NotifyFileUploaded(this, rFilename, FileSize);
+ rNotifier.NotifyFileUploaded(this, rFilename, FileSize);
// Return the new object ID of this file
return objID;
@@ -1457,9 +1562,8 @@ void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClie
//
// --------------------------------------------------------------------------
BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon,
- ProgressNotifier &rProgressNotifier, BackupClientContext &rContext)
- : mrProgressNotifier(rProgressNotifier),
- mSyncPeriodStart(0),
+ BackupClientContext &rContext)
+ : mSyncPeriodStart(0),
mSyncPeriodEnd(0),
mMaxUploadWait(0),
mMaxFileTimeInFuture(99999999999999999LL),
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.h b/bin/bbackupd/BackupClientDirectoryRecord.h
index 9e4dda7a..472005b6 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.h
+++ b/bin/bbackupd/BackupClientDirectoryRecord.h
@@ -96,6 +96,12 @@ class ProgressNotifier
const BackupClientDirectoryRecord* pDirRecord,
const std::string& rLocalPath,
int64_t FileSize) = 0;
+ virtual void NotifyDirectoryDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath) = 0;
+ virtual void NotifyFileDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath) = 0;
};
// --------------------------------------------------------------------------
@@ -137,14 +143,12 @@ public:
public:
SyncParams(
BackupDaemon &rDaemon,
- ProgressNotifier &rProgressNotifier,
BackupClientContext &rContext);
~SyncParams();
private:
// No copying
SyncParams(const SyncParams&);
SyncParams &operator=(const SyncParams&);
- ProgressNotifier &mrProgressNotifier;
public:
// Data members are public, as accessors are not justified here
@@ -161,42 +165,52 @@ public:
// Member variables modified by syncing process
box_time_t mUploadAfterThisTimeInTheFuture;
bool mHaveLoggedWarningAboutFutureFileTimes;
-
- ProgressNotifier& GetProgressNotifier() const
- {
- return mrProgressNotifier;
- }
};
- void SyncDirectory(SyncParams &rParams, int64_t ContainingDirectoryID, const std::string &rLocalPath,
+ void SyncDirectory(SyncParams &rParams,
+ int64_t ContainingDirectoryID,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
bool ThisDirHasJustBeenCreated = false);
private:
void DeleteSubDirectories();
BackupStoreDirectory *FetchDirectoryListing(SyncParams &rParams);
- void UpdateAttributes(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath);
- bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ void UpdateAttributes(SyncParams &rParams,
+ BackupStoreDirectory *pDirOnStore,
+ const std::string &rLocalPath);
+ bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ BackupStoreDirectory *pDirOnStore,
std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
- std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs);
- int64_t UploadFile(SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
- int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer);
- void SetErrorWhenReadingFilesystemObject(SyncParams &rParams, const char *Filename);
- void RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename);
+ std::vector<std::string> &rFiles,
+ const std::vector<std::string> &rDirs);
+ int64_t UploadFile(SyncParams &rParams,
+ const std::string &rFilename,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime,
+ box_time_t AttributesHash, bool NoPreviousVersionOnServer);
+ void SetErrorWhenReadingFilesystemObject(SyncParams &rParams,
+ const char *Filename);
+ void RemoveDirectoryInPlaceOfFile(SyncParams &rParams,
+ BackupStoreDirectory* pDirOnStore,
+ BackupStoreDirectory::Entry* pEntry,
+ const std::string &rFilename);
private:
- int64_t mObjectID;
+ int64_t mObjectID;
std::string mSubDirName;
- bool mInitialSyncDone;
- bool mSyncDone;
+ bool mInitialSyncDone;
+ bool mSyncDone;
// Checksum of directory contents and attributes, used to detect changes
uint8_t mStateChecksum[MD5Digest::DigestLength];
- std::map<std::string, box_time_t> *mpPendingEntries;
- std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
+ std::map<std::string, box_time_t> *mpPendingEntries;
+ std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
// mpPendingEntries is a pointer rather than simple a member
- // variables, because most of the time it'll be empty. This would waste a lot
- // of memory because of STL allocation policies.
+ // variable, because most of the time it'll be empty. This would
+ // waste a lot of memory because of STL allocation policies.
};
#endif // BACKUPCLIENTDIRECTORYRECORD__H
diff --git a/bin/bbackupd/BackupDaemon.cpp b/bin/bbackupd/BackupDaemon.cpp
index a41c1cbc..8f68ff1a 100644
--- a/bin/bbackupd/BackupDaemon.cpp
+++ b/bin/bbackupd/BackupDaemon.cpp
@@ -47,7 +47,6 @@
#include "BoxPortsAndFiles.h"
#include "SSLLib.h"
-#include "TLSContext.h"
#include "BackupDaemon.h"
#include "BackupDaemonConfigVerify.h"
@@ -124,6 +123,10 @@ BackupDaemon::BackupDaemon()
: mState(BackupDaemon::State_Initialising),
mpCommandSocketInfo(0),
mDeleteUnusedRootDirEntriesAfter(0),
+ mClientStoreMarker(BackupClientContext::ClientStoreMarker_NotKnown),
+ mStorageLimitExceeded(false),
+ mReadErrorsOnFilesystemObjects(false),
+ mLastSyncTime(0),
mLogAllFileAccess(false)
#ifdef WIN32
, mInstallService(false),
@@ -294,7 +297,7 @@ void BackupDaemon::DeleteAllLocations()
// Clear the contents of the map, so it is empty
mLocations.clear();
- // And delete everything from the assoicated mount vector
+ // And delete everything from the associated mount vector
mIDMapMounts.clear();
}
@@ -685,70 +688,51 @@ void BackupDaemon::Run()
Timers::Cleanup();
}
-// --------------------------------------------------------------------------
-//
-// Function
-// Name: BackupDaemon::Run2()
-// Purpose: Run function for daemon (second stage)
-// Created: 2003/10/08
-//
-// --------------------------------------------------------------------------
-void BackupDaemon::Run2()
+void BackupDaemon::InitCrypto()
{
// Read in the certificates creating a TLS context
- TLSContext tlsContext;
const Configuration &conf(GetConfiguration());
std::string certFile(conf.GetKeyValue("CertificateFile"));
std::string keyFile(conf.GetKeyValue("PrivateKeyFile"));
std::string caFile(conf.GetKeyValue("TrustedCAsFile"));
- tlsContext.Initialise(false /* as client */, certFile.c_str(), keyFile.c_str(), caFile.c_str());
+ mTlsContext.Initialise(false /* as client */, certFile.c_str(),
+ keyFile.c_str(), caFile.c_str());
// Set up the keys for various things
BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+}
- // Setup various timings
- int maximumDiffingTime = 600;
- int keepAliveTime = 60;
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run2()
+// Purpose: Run function for daemon (second stage)
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run2()
+{
+ InitCrypto();
- // max diffing time, keep-alive time
- if(conf.KeyExists("MaximumDiffingTime"))
- {
- maximumDiffingTime = conf.GetKeyValueInt("MaximumDiffingTime");
- }
- if(conf.KeyExists("KeepAliveTime"))
- {
- keepAliveTime = conf.GetKeyValueInt("KeepAliveTime");
- }
+ const Configuration &conf(GetConfiguration());
// How often to connect to the store (approximate)
- box_time_t updateStoreInterval = SecondsToBoxTime(conf.GetKeyValueInt("UpdateStoreInterval"));
+ box_time_t updateStoreInterval = SecondsToBoxTime(
+ conf.GetKeyValueInt("UpdateStoreInterval"));
// But are we connecting automatically?
bool automaticBackup = conf.GetKeyValueBool("AutomaticBackup");
- // The minimum age a file needs to be before it will be considered for uploading
- box_time_t minimumFileAge = SecondsToBoxTime(conf.GetKeyValueInt("MinimumFileAge"));
-
- // The maximum time we'll wait to upload a file, regardless of how often it's modified
- box_time_t maxUploadWait = SecondsToBoxTime(conf.GetKeyValueInt("MaxUploadWait"));
- // Adjust by subtracting the minimum file age, so is relative to sync period end in comparisons
- maxUploadWait = (maxUploadWait > minimumFileAge)?(maxUploadWait - minimumFileAge):(0);
-
// When the next sync should take place -- which is ASAP
box_time_t nextSyncTime = 0;
// When the last sync started (only updated if the store was not full when the sync ended)
- box_time_t lastSyncTime = 0;
+ mLastSyncTime = 0;
// --------------------------------------------------------------------------------------------
- // And what's the current client store marker?
- int64_t clientStoreMarker =
- BackupClientContext::ClientStoreMarker_NotKnown;
- // haven't contacted the store yet
-
bool deleteStoreObjectInfoFile = DeserializeStoreObjectInfo(
- clientStoreMarker, lastSyncTime, nextSyncTime);
+ mLastSyncTime, nextSyncTime);
// --------------------------------------------------------------------------------------------
@@ -756,6 +740,8 @@ void BackupDaemon::Run2()
// Set state
SetState(State_Idle);
+ bool doSyncForcedByPreviousSyncError = false;
+
// Loop around doing backups
do
{
@@ -765,70 +751,75 @@ void BackupDaemon::Run2()
bool doSyncForcedByCommand = false;
// Is a delay necessary?
+ box_time_t currentTime;
+
+ do
{
- box_time_t currentTime;
- do
+ // Check whether we should be stopping,
+ // and don't run a sync if so.
+ if(StopRun()) break;
+
+ currentTime = GetCurrentBoxTime();
+
+ // Pause a while, but no more than
+ // MAX_SLEEP_TIME seconds (use the conditional
+ // because times are unsigned)
+ box_time_t requiredDelay =
+ (nextSyncTime < currentTime)
+ ? (0)
+ : (nextSyncTime - currentTime);
+
+ // If there isn't automatic backup happening,
+ // set a long delay. And limit delays at the
+ // same time.
+ if(!automaticBackup && !doSyncForcedByPreviousSyncError)
{
- // Check whether we should be stopping,
- // and don't run a sync if so.
- if(StopRun()) break;
-
- currentTime = GetCurrentBoxTime();
-
- // Pause a while, but no more than
- // MAX_SLEEP_TIME seconds (use the conditional
- // because times are unsigned)
- box_time_t requiredDelay =
- (nextSyncTime < currentTime)
- ? (0)
- : (nextSyncTime - currentTime);
-
- // If there isn't automatic backup happening,
- // set a long delay. And limit delays at the
- // same time.
- if(!automaticBackup || requiredDelay >
- SecondsToBoxTime(MAX_SLEEP_TIME))
+ requiredDelay = SecondsToBoxTime(MAX_SLEEP_TIME);
+ }
+ else if(requiredDelay > SecondsToBoxTime(MAX_SLEEP_TIME))
+ {
+ requiredDelay = SecondsToBoxTime(MAX_SLEEP_TIME);
+ }
+
+ // Only delay if necessary
+ if(requiredDelay > 0)
+ {
+ // Sleep somehow. There are choices
+ // on how this should be done,
+ // depending on the state of the
+ // control connection
+ if(mpCommandSocketInfo != 0)
{
- requiredDelay = SecondsToBoxTime(
- MAX_SLEEP_TIME);
+ // A command socket exists,
+ // so sleep by waiting on it
+ WaitOnCommandSocket(requiredDelay,
+ doSync, doSyncForcedByCommand);
}
-
- // Only delay if necessary
- if(requiredDelay > 0)
+ else
{
- // Sleep somehow. There are choices
- // on how this should be done,
- // depending on the state of the
- // control connection
- if(mpCommandSocketInfo != 0)
- {
- // A command socket exists,
- // so sleep by waiting on it
- WaitOnCommandSocket(
- requiredDelay, doSync,
- doSyncForcedByCommand);
- }
- else
- {
- // No command socket or
- // connection, just do a
- // normal sleep
- time_t sleepSeconds =
- BoxTimeToSeconds(
- requiredDelay);
- ::sleep((sleepSeconds <= 0)
- ? 1
- : sleepSeconds);
- }
+ // No command socket or
+ // connection, just do a
+ // normal sleep
+ time_t sleepSeconds =
+ BoxTimeToSeconds(requiredDelay);
+ ::sleep((sleepSeconds <= 0)
+ ? 1 : sleepSeconds);
}
-
- } while((!automaticBackup || (currentTime < nextSyncTime)) && !doSync && !StopRun());
+ }
+
+ if ((automaticBackup || doSyncForcedByPreviousSyncError)
+ && currentTime >= nextSyncTime)
+ {
+ doSync = true;
+ }
}
+ while(!doSync && !StopRun());
// Time of sync start, and if it's time for another sync
// (and we're doing automatic syncs), set the flag
box_time_t currentSyncStartTime = GetCurrentBoxTime();
- if(automaticBackup && currentSyncStartTime >= nextSyncTime)
+ if((automaticBackup || doSyncForcedByPreviousSyncError) &&
+ currentSyncStartTime >= nextSyncTime)
{
doSync = true;
}
@@ -859,55 +850,6 @@ void BackupDaemon::Run2()
// Reset statistics on uploads
BackupStoreFile::ResetStats();
- // Calculate the sync period of files to examine
- box_time_t syncPeriodStart = lastSyncTime;
- box_time_t syncPeriodEnd = currentSyncStartTime -
- minimumFileAge;
-
- if(syncPeriodStart >= syncPeriodEnd &&
- syncPeriodStart - syncPeriodEnd < minimumFileAge)
- {
- // This can happen if we receive a force-sync
- // command less than minimumFileAge after
- // the last sync. Deal with it by moving back
- // syncPeriodStart, which should not do any
- // damage.
- syncPeriodStart = syncPeriodEnd -
- SecondsToBoxTime(1);
- }
-
- if(syncPeriodStart >= syncPeriodEnd)
- {
- BOX_ERROR("Invalid (negative) sync period: "
- "perhaps your clock is going "
- "backwards (" << syncPeriodStart <<
- " to " << syncPeriodEnd << ")");
- THROW_EXCEPTION(ClientException,
- ClockWentBackwards);
- }
-
- // Check logic
- ASSERT(syncPeriodEnd > syncPeriodStart);
- // Paranoid check on sync times
- if(syncPeriodStart >= syncPeriodEnd) continue;
-
- // Adjust syncPeriodEnd to emulate snapshot
- // behaviour properly
- box_time_t syncPeriodEndExtended = syncPeriodEnd;
- // Using zero min file age?
- if(minimumFileAge == 0)
- {
- // Add a year on to the end of the end time,
- // to make sure we sync files which are
- // modified after the scan run started.
- // Of course, they may be eligible to be
- // synced again the next time round,
- // but this should be OK, because the changes
- // only upload should upload no data.
- syncPeriodEndExtended += SecondsToBoxTime(
- (time_t)(356*24*3600));
- }
-
// Delete the serialised store object file,
// so that we don't try to reload it after a
// partially completed backup
@@ -933,115 +875,13 @@ void BackupDaemon::Run2()
try
{
- // Set state and log start
- SetState(State_Connected);
- BOX_NOTICE("Beginning scan of local files");
-
- std::string extendedLogFile;
- if (conf.KeyExists("ExtendedLogFile"))
- {
- extendedLogFile = conf.GetKeyValue(
- "ExtendedLogFile");
- }
-
- if (conf.KeyExists("LogAllFileAccess"))
- {
- mLogAllFileAccess =
- conf.GetKeyValueBool(
- "LogAllFileAccess");
- }
-
- // Then create a client context object (don't
- // just connect, as this may be unnecessary)
- BackupClientContext clientContext
- (
- *this,
- tlsContext,
- conf.GetKeyValue("StoreHostname"),
- conf.GetKeyValueInt("StorePort"),
- conf.GetKeyValueInt("AccountNumber"),
- conf.GetKeyValueBool("ExtendedLogging"),
- conf.KeyExists("ExtendedLogFile"),
- extendedLogFile
- );
-
- // Set up the sync parameters
- BackupClientDirectoryRecord::SyncParams params(
- *this, *this, clientContext);
- params.mSyncPeriodStart = syncPeriodStart;
- params.mSyncPeriodEnd = syncPeriodEndExtended;
- // use potentially extended end time
- params.mMaxUploadWait = maxUploadWait;
- params.mFileTrackingSizeThreshold =
- conf.GetKeyValueInt(
- "FileTrackingSizeThreshold");
- params.mDiffingUploadSizeThreshold =
- conf.GetKeyValueInt(
- "DiffingUploadSizeThreshold");
- params.mMaxFileTimeInFuture =
- SecondsToBoxTime(
- conf.GetKeyValueInt(
- "MaxFileTimeInFuture"));
- mDeleteRedundantLocationsAfter =
- conf.GetKeyValueInt(
- "DeleteRedundantLocationsAfter");
-
- clientContext.SetMaximumDiffingTime(maximumDiffingTime);
- clientContext.SetKeepAliveTime(keepAliveTime);
-
- // Set store marker
- clientContext.SetClientStoreMarker(clientStoreMarker);
-
- // Set up the locations, if necessary --
- // need to do it here so we have a
- // (potential) connection to use
- if(mLocations.empty())
- {
- const Configuration &locations(
- conf.GetSubConfiguration(
- "BackupLocations"));
-
- // Make sure all the directory records
- // are set up
- SetupLocations(clientContext, locations);
- }
-
- // Get some ID maps going
- SetupIDMapsForSync();
-
- // Delete any unused directories?
- DeleteUnusedRootDirEntries(clientContext);
-
// Notify administrator
NotifySysadmin(NotifyEvent_BackupStart);
- // Go through the records, syncing them
- for(std::vector<Location *>::const_iterator
- i(mLocations.begin());
- i != mLocations.end(); ++i)
- {
- // Set current and new ID map pointers
- // in the context
- clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex], mNewIDMaps[(*i)->mIDMapIndex]);
-
- // Set exclude lists (context doesn't
- // take ownership)
- clientContext.SetExcludeLists(
- (*i)->mpExcludeFiles,
- (*i)->mpExcludeDirs);
-
- // Sync the directory
- (*i)->mpDirectoryRecord->SyncDirectory(
- params,
- BackupProtocolClientListDirectory::RootDirectory,
- (*i)->mPath);
+ RunSyncNow();
- // Unset exclude lists (just in case)
- clientContext.SetExcludeLists(0, 0);
- }
-
// Errors reading any files?
- if(params.mReadErrorsOnFilesystemObjects)
+ if(mReadErrorsOnFilesystemObjects)
{
// Notify administrator
NotifySysadmin(NotifyEvent_ReadError);
@@ -1053,33 +893,14 @@ void BackupDaemon::Run2()
mNotificationsSent[NotifyEvent_ReadError] = false;
}
- // Perform any deletions required -- these are
- // delayed until the end to allow renaming to
- // happen neatly.
- clientContext.PerformDeletions();
-
- // Close any open connection
- clientContext.CloseAnyOpenConnection();
-
- // Get the new store marker
- clientStoreMarker = clientContext.GetClientStoreMarker();
-
// Check the storage limit
- if(clientContext.StorageLimitExceeded())
+ if(mStorageLimitExceeded)
{
// Tell the sysadmin about this
NotifySysadmin(NotifyEvent_StoreFull);
}
else
{
- // The start time of the next run is
- // the end time of this run.
- // This is only done if the storage
- // limit wasn't exceeded (as things
- // won't have been done properly if
- // it was)
- lastSyncTime = syncPeriodEnd;
-
// unflag the storage full notify flag
// so that next time the store is full,
// an alert will be sent
@@ -1092,12 +913,6 @@ void BackupDaemon::Run2()
Random::RandomInt(updateStoreInterval >>
SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
- // Commit the ID Maps
- CommitIDMapsAfterSync();
-
- // Log
- BOX_NOTICE("Finished scan of local files");
-
// Notify administrator
NotifySysadmin(NotifyEvent_BackupFinish);
@@ -1108,11 +923,14 @@ void BackupDaemon::Run2()
// delete the file next time we start a backup
deleteStoreObjectInfoFile =
- SerializeStoreObjectInfo(
- clientStoreMarker,
- lastSyncTime, nextSyncTime);
+ SerializeStoreObjectInfo(mLastSyncTime,
+ nextSyncTime);
// --------------------------------------------------------------------------------------------
+
+ // If we were retrying after an error,
+ // now would be a good time to stop :-)
+ doSyncForcedByPreviousSyncError = false;
}
catch(BoxException &e)
{
@@ -1153,9 +971,9 @@ void BackupDaemon::Run2()
}
// Clear state data
- syncPeriodStart = 0;
- // go back to beginning of time
- clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
+ // Go back to beginning of time
+ mLastSyncTime = 0;
+ mClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
DeleteAllLocations();
DeleteAllIDMaps();
@@ -1168,7 +986,8 @@ void BackupDaemon::Run2()
return;
}
- // If the Berkely db files get corrupted, delete them and try again immediately
+ // If the Berkely db files get corrupted,
+ // delete them and try again immediately.
if(isBerkelyDbFailure)
{
BOX_ERROR("Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.");
@@ -1188,10 +1007,11 @@ void BackupDaemon::Run2()
"waiting to retry...");
::sleep(10);
nextSyncTime = currentSyncStartTime +
- SecondsToBoxTime(90) +
+ SecondsToBoxTime(100) +
Random::RandomInt(
updateStoreInterval >>
SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
+ doSyncForcedByPreviousSyncError = true;
}
}
@@ -1221,6 +1041,239 @@ void BackupDaemon::Run2()
DeleteAllIDMaps();
}
+void BackupDaemon::RunSyncNow()
+{
+ // Set state and log start
+ SetState(State_Connected);
+ BOX_NOTICE("Beginning scan of local files");
+
+ const Configuration &conf(GetConfiguration());
+
+ std::string extendedLogFile;
+ if (conf.KeyExists("ExtendedLogFile"))
+ {
+ extendedLogFile = conf.GetKeyValue("ExtendedLogFile");
+ }
+
+ if (conf.KeyExists("LogAllFileAccess"))
+ {
+ mLogAllFileAccess = conf.GetKeyValueBool("LogAllFileAccess");
+ }
+
+ // Then create a client context object (don't
+ // just connect, as this may be unnecessary)
+ BackupClientContext clientContext
+ (
+ *this,
+ mTlsContext,
+ conf.GetKeyValue("StoreHostname"),
+ conf.GetKeyValueInt("StorePort"),
+ conf.GetKeyValueInt("AccountNumber"),
+ conf.GetKeyValueBool("ExtendedLogging"),
+ conf.KeyExists("ExtendedLogFile"),
+ extendedLogFile, *this
+ );
+
+ // The minimum age a file needs to be before it will be
+ // considered for uploading
+ box_time_t minimumFileAge = SecondsToBoxTime(
+ conf.GetKeyValueInt("MinimumFileAge"));
+
+ // The maximum time we'll wait to upload a file, regardless
+ // of how often it's modified
+ box_time_t maxUploadWait = SecondsToBoxTime(
+ conf.GetKeyValueInt("MaxUploadWait"));
+ // Adjust by subtracting the minimum file age, so is relative
+ // to sync period end in comparisons
+ if (maxUploadWait > minimumFileAge)
+ {
+ maxUploadWait -= minimumFileAge;
+ }
+ else
+ {
+ maxUploadWait = 0;
+ }
+
+ // Calculate the sync period of files to examine
+ box_time_t syncPeriodStart = mLastSyncTime;
+ box_time_t syncPeriodEnd = GetCurrentBoxTime() - minimumFileAge;
+
+ if(syncPeriodStart >= syncPeriodEnd &&
+ syncPeriodStart - syncPeriodEnd < minimumFileAge)
+ {
+ // This can happen if we receive a force-sync
+ // command less than minimumFileAge after
+ // the last sync. Deal with it by moving back
+ // syncPeriodStart, which should not do any
+ // damage.
+ syncPeriodStart = syncPeriodEnd -
+ SecondsToBoxTime(1);
+ }
+
+ if(syncPeriodStart >= syncPeriodEnd)
+ {
+ BOX_ERROR("Invalid (negative) sync period: "
+ "perhaps your clock is going "
+ "backwards (" << syncPeriodStart <<
+ " to " << syncPeriodEnd << ")");
+ THROW_EXCEPTION(ClientException,
+ ClockWentBackwards);
+ }
+
+ // Check logic
+ ASSERT(syncPeriodEnd > syncPeriodStart);
+ // Paranoid check on sync times
+ if(syncPeriodStart >= syncPeriodEnd) return;
+
+ // Adjust syncPeriodEnd to emulate snapshot
+ // behaviour properly
+ box_time_t syncPeriodEndExtended = syncPeriodEnd;
+
+ // Using zero min file age?
+ if(minimumFileAge == 0)
+ {
+ // Add a year on to the end of the end time,
+ // to make sure we sync files which are
+ // modified after the scan run started.
+ // Of course, they may be eligible to be
+ // synced again the next time round,
+ // but this should be OK, because the changes
+ // only upload should upload no data.
+ syncPeriodEndExtended += SecondsToBoxTime(
+ (time_t)(356*24*3600));
+ }
+
+ // Set up the sync parameters
+ BackupClientDirectoryRecord::SyncParams params(
+ *this, clientContext);
+ params.mSyncPeriodStart = syncPeriodStart;
+ params.mSyncPeriodEnd = syncPeriodEndExtended;
+ // use potentially extended end time
+ params.mMaxUploadWait = maxUploadWait;
+ params.mFileTrackingSizeThreshold =
+ conf.GetKeyValueInt(
+ "FileTrackingSizeThreshold");
+ params.mDiffingUploadSizeThreshold =
+ conf.GetKeyValueInt(
+ "DiffingUploadSizeThreshold");
+ params.mMaxFileTimeInFuture =
+ SecondsToBoxTime(
+ conf.GetKeyValueInt(
+ "MaxFileTimeInFuture"));
+ mDeleteRedundantLocationsAfter =
+ conf.GetKeyValueInt(
+ "DeleteRedundantLocationsAfter");
+ mStorageLimitExceeded = false;
+ mReadErrorsOnFilesystemObjects = false;
+
+ // Setup various timings
+ int maximumDiffingTime = 600;
+ int keepAliveTime = 60;
+
+ // max diffing time, keep-alive time
+ if(conf.KeyExists("MaximumDiffingTime"))
+ {
+ maximumDiffingTime = conf.GetKeyValueInt("MaximumDiffingTime");
+ }
+ if(conf.KeyExists("KeepAliveTime"))
+ {
+ keepAliveTime = conf.GetKeyValueInt("KeepAliveTime");
+ }
+
+ clientContext.SetMaximumDiffingTime(maximumDiffingTime);
+ clientContext.SetKeepAliveTime(keepAliveTime);
+
+ // Set store marker
+ clientContext.SetClientStoreMarker(mClientStoreMarker);
+
+ // Set up the locations, if necessary --
+ // need to do it here so we have a
+ // (potential) connection to use
+ if(mLocations.empty())
+ {
+ const Configuration &locations(
+ conf.GetSubConfiguration(
+ "BackupLocations"));
+
+ // Make sure all the directory records
+ // are set up
+ SetupLocations(clientContext, locations);
+ }
+
+ // Get some ID maps going
+ SetupIDMapsForSync();
+
+ // Delete any unused directories?
+ DeleteUnusedRootDirEntries(clientContext);
+
+ // Go through the records, syncing them
+ for(std::vector<Location *>::const_iterator
+ i(mLocations.begin());
+ i != mLocations.end(); ++i)
+ {
+ // Set current and new ID map pointers
+ // in the context
+ clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex],
+ mNewIDMaps[(*i)->mIDMapIndex]);
+
+ // Set exclude lists (context doesn't
+ // take ownership)
+ clientContext.SetExcludeLists(
+ (*i)->mpExcludeFiles,
+ (*i)->mpExcludeDirs);
+
+ // Sync the directory
+ (*i)->mpDirectoryRecord->SyncDirectory(
+ params,
+ BackupProtocolClientListDirectory::RootDirectory,
+ (*i)->mPath, std::string("/") + (*i)->mName);
+
+ // Unset exclude lists (just in case)
+ clientContext.SetExcludeLists(0, 0);
+ }
+
+ // Errors reading any files?
+ if(params.mReadErrorsOnFilesystemObjects)
+ {
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_ReadError);
+ }
+ else
+ {
+ // Unset the read error flag, so the // error is reported again if it
+ // happens again
+ mNotificationsSent[NotifyEvent_ReadError] = false;
+ }
+
+ // Perform any deletions required -- these are
+ // delayed until the end to allow renaming to
+ // happen neatly.
+ clientContext.PerformDeletions();
+
+ // Close any open connection
+ clientContext.CloseAnyOpenConnection();
+
+ // Get the new store marker
+ mClientStoreMarker = clientContext.GetClientStoreMarker();
+ mStorageLimitExceeded = clientContext.StorageLimitExceeded();
+ mReadErrorsOnFilesystemObjects =
+ params.mReadErrorsOnFilesystemObjects;
+
+ if(!mStorageLimitExceeded)
+ {
+ // The start time of the next run is the end time of this
+ // run. This is only done if the storage limit wasn't
+ // exceeded (as things won't have been done properly if
+ // it was)
+ mLastSyncTime = syncPeriodEnd;
+ }
+
+ // Commit the ID Maps
+ CommitIDMapsAfterSync();
+
+ // Log
+ BOX_NOTICE("Finished scan of local files");
+}
// --------------------------------------------------------------------------
//
@@ -2460,13 +2513,13 @@ void BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &rContext)
// Entries to delete, and it's the right time to do so...
BOX_NOTICE("Deleting unused locations from store root...");
BackupProtocolClient &connection(rContext.GetConnection());
- for(std::vector<std::pair<int64_t,std::string> >::iterator i(mUnusedRootDirEntries.begin()); i != mUnusedRootDirEntries.end(); ++i)
+ for(std::vector<std::pair<int64_t,std::string> >::iterator
+ i(mUnusedRootDirEntries.begin());
+ i != mUnusedRootDirEntries.end(); ++i)
{
connection.QueryDeleteDirectory(i->first);
-
- // Log this
- BOX_NOTICE("Deleted " << i->second << " (ID " << i->first
- << ") from store root");
+ rContext.GetProgressNotifier().NotifyFileDeleted(
+ i->first, i->second);
}
// Reset state
@@ -2737,9 +2790,12 @@ BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime)
-// Purpose: Serializes remote directory and file information into a stream of bytes, using an Archive abstraction.
-//
+// Name: BackupDaemon::SerializeStoreObjectInfo(
+// box_time_t theLastSyncTime,
+// box_time_t theNextSyncTime)
+// Purpose: Serializes remote directory and file information
+// into a stream of bytes, using an Archive
+// abstraction.
// Created: 2005/04/11
//
// --------------------------------------------------------------------------
@@ -2748,7 +2804,8 @@ static const int STOREOBJECTINFO_MAGIC_ID_VALUE = 0x7777525F;
static const std::string STOREOBJECTINFO_MAGIC_ID_STRING = "BBACKUPD-STATE";
static const int STOREOBJECTINFO_VERSION = 2;
-bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime) const
+bool BackupDaemon::SerializeStoreObjectInfo(box_time_t theLastSyncTime,
+ box_time_t theNextSyncTime) const
{
if(!GetConfiguration().KeyExists("StoreObjectInfoFile"))
{
@@ -2777,7 +2834,7 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
anArchive.Write(STOREOBJECTINFO_MAGIC_ID_STRING);
anArchive.Write(STOREOBJECTINFO_VERSION);
anArchive.Write(GetLoadedConfigModifiedTime());
- anArchive.Write(aClientStoreMarker);
+ anArchive.Write(mClientStoreMarker);
anArchive.Write(theLastSyncTime);
anArchive.Write(theNextSyncTime);
@@ -2829,15 +2886,13 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
}
catch(std::exception &e)
{
- BOX_ERROR("Internal error writing store object "
- "info file (" << StoreObjectInfoFile << "): "
- << e.what());
+ BOX_ERROR("Failed to write StoreObjectInfoFile: " <<
+ StoreObjectInfoFile << ": " << e.what());
}
catch(...)
{
- BOX_ERROR("Internal error writing store object "
- "info file (" << StoreObjectInfoFile << "): "
- "unknown error");
+ BOX_ERROR("Failed to write StoreObjectInfoFile: " <<
+ StoreObjectInfoFile << ": unknown error");
}
return created;
@@ -2846,13 +2901,17 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_time_t & theLastSyncTime, box_time_t & theNextSyncTime)
-// Purpose: Deserializes remote directory and file information from a stream of bytes, using an Archive abstraction.
-//
+// Name: BackupDaemon::DeserializeStoreObjectInfo(
+// box_time_t & theLastSyncTime,
+// box_time_t & theNextSyncTime)
+// Purpose: Deserializes remote directory and file information
+// from a stream of bytes, using an Archive
+// abstraction.
// Created: 2005/04/11
//
// --------------------------------------------------------------------------
-bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_time_t & theLastSyncTime, box_time_t & theNextSyncTime)
+bool BackupDaemon::DeserializeStoreObjectInfo(box_time_t & theLastSyncTime,
+ box_time_t & theNextSyncTime)
{
//
//
@@ -2944,7 +3003,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
//
// this is it, go at it
//
- anArchive.Read(aClientStoreMarker);
+ anArchive.Read(mClientStoreMarker);
anArchive.Read(theLastSyncTime);
anArchive.Read(theNextSyncTime);
@@ -3022,7 +3081,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
DeleteAllLocations();
- aClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown;
+ mClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown;
theLastSyncTime = 0;
theNextSyncTime = 0;
diff --git a/bin/bbackupd/BackupDaemon.h b/bin/bbackupd/BackupDaemon.h
index 1d5efd2c..81ebf982 100644
--- a/bin/bbackupd/BackupDaemon.h
+++ b/bin/bbackupd/BackupDaemon.h
@@ -14,13 +14,15 @@
#include <string>
#include <memory>
+#include "BackupClientDirectoryRecord.h"
#include "BoxTime.h"
#include "Daemon.h"
-#include "BackupClientDirectoryRecord.h"
+#include "Logging.h"
#include "Socket.h"
#include "SocketListen.h"
#include "SocketStream.h"
-#include "Logging.h"
+#include "TLSContext.h"
+
#include "autogen_BackupProtocolClient.h"
#ifdef WIN32
@@ -52,10 +54,10 @@ public:
private:
// methods below do partial (specialized) serialization of
// client state only
- bool SerializeStoreObjectInfo(int64_t aClientStoreMarker,
- box_time_t theLastSyncTime, box_time_t theNextSyncTime) const;
- bool DeserializeStoreObjectInfo(int64_t & aClientStoreMarker,
- box_time_t & theLastSyncTime, box_time_t & theNextSyncTime);
+ bool SerializeStoreObjectInfo(box_time_t theLastSyncTime,
+ box_time_t theNextSyncTime) const;
+ bool DeserializeStoreObjectInfo(box_time_t & theLastSyncTime,
+ box_time_t & theNextSyncTime);
bool DeleteStoreObjectInfo() const;
BackupDaemon(const BackupDaemon &);
@@ -111,6 +113,11 @@ public:
private:
void Run2();
+public:
+ void InitCrypto();
+ void RunSyncNow();
+
+private:
void DeleteAllLocations();
void SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf);
@@ -205,8 +212,15 @@ private:
box_time_t mDeleteUnusedRootDirEntriesAfter; // time to delete them
std::vector<std::pair<int64_t,std::string> > mUnusedRootDirEntries;
+ int64_t mClientStoreMarker;
+ bool mStorageLimitExceeded;
+ bool mReadErrorsOnFilesystemObjects;
+ box_time_t mLastSyncTime;
+ TLSContext mTlsContext;
+
public:
bool StopRun() { return this->Daemon::StopRun(); }
+ bool StorageLimitExceeded() { return mStorageLimitExceeded; }
private:
bool mLogAllFileAccess;
@@ -427,6 +441,28 @@ public:
BOX_INFO("Synchronised file: " << rLocalPath);
}
}
+ virtual void NotifyDirectoryDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_NOTICE("Deleted directory: " << rRemotePath <<
+ " (ID " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ ")");
+ }
+ }
+ virtual void NotifyFileDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_NOTICE("Deleted file: " << rRemotePath <<
+ " (ID " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ ")");
+ }
+ }
#ifdef WIN32
public: