summaryrefslogtreecommitdiff
path: root/bin/bbackupd
diff options
context:
space:
mode:
Diffstat (limited to 'bin/bbackupd')
-rw-r--r--bin/bbackupd/BackupClientContext.cpp27
-rw-r--r--bin/bbackupd/BackupClientContext.h19
-rw-r--r--bin/bbackupd/BackupClientDeleteList.cpp80
-rw-r--r--bin/bbackupd/BackupClientDeleteList.h34
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.cpp576
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.h159
-rw-r--r--bin/bbackupd/BackupClientInodeToIDMap.cpp11
-rw-r--r--bin/bbackupd/BackupDaemon.cpp1699
-rw-r--r--bin/bbackupd/BackupDaemon.h148
-rw-r--r--bin/bbackupd/BackupDaemonInterface.h164
-rw-r--r--bin/bbackupd/Win32ServiceFunctions.cpp8
-rwxr-xr-xbin/bbackupd/bbackupd-config.in6
-rw-r--r--bin/bbackupd/bbackupd.cpp2
-rw-r--r--bin/bbackupd/win32/NotifySysAdmin.vbs62
-rw-r--r--bin/bbackupd/win32/bbackupd.conf2
15 files changed, 1655 insertions, 1342 deletions
diff --git a/bin/bbackupd/BackupClientContext.cpp b/bin/bbackupd/BackupClientContext.cpp
index 4b4efd90..b978f54c 100644
--- a/bin/bbackupd/BackupClientContext.cpp
+++ b/bin/bbackupd/BackupClientContext.cpp
@@ -41,17 +41,20 @@
// --------------------------------------------------------------------------
BackupClientContext::BackupClientContext
(
- BackupDaemon &rDaemon,
+ LocationResolver &rResolver,
TLSContext &rTLSContext,
const std::string &rHostname,
+ int Port,
int32_t AccountNumber,
bool ExtendedLogging,
bool ExtendedLogToFile,
- std::string ExtendedLogFile
+ std::string ExtendedLogFile,
+ ProgressNotifier& rProgressNotifier
)
- : mrDaemon(rDaemon),
+ : mrResolver(rResolver),
mrTLSContext(rTLSContext),
mHostname(rHostname),
+ mPort(Port),
mAccountNumber(AccountNumber),
mpSocket(0),
mpConnection(0),
@@ -66,8 +69,9 @@ BackupClientContext::BackupClientContext
mStorageLimitExceeded(false),
mpExcludeFiles(0),
mpExcludeDirs(0),
- mKeepAliveTimer(0),
- mbIsManaged(false)
+ mKeepAliveTimer(0, "KeepAliveTime"),
+ mbIsManaged(false),
+ mrProgressNotifier(rProgressNotifier)
{
}
@@ -129,7 +133,8 @@ BackupProtocolClient &BackupClientContext::GetConnection()
mHostname << "'...");
// Connect!
- mpSocket->Open(mrTLSContext, Socket::TypeINET, mHostname.c_str(), BOX_PORT_BBSTORED);
+ mpSocket->Open(mrTLSContext, Socket::TypeINET,
+ mHostname.c_str(), mPort);
// And create a procotol object
mpConnection = new BackupProtocolClient(*mpSocket);
@@ -146,8 +151,8 @@ BackupProtocolClient &BackupClientContext::GetConnection()
if (!mpExtendedLogFileHandle)
{
- BOX_ERROR("Failed to open extended log "
- "file: " << strerror(errno));
+ BOX_LOG_SYS_ERROR("Failed to open extended "
+ "log file: " << mExtendedLogFile);
}
else
{
@@ -465,7 +470,7 @@ bool BackupClientContext::FindFilename(int64_t ObjectID, int64_t ContainingDirec
{
// Location name -- look up in daemon's records
std::string locPath;
- if(!mrDaemon.FindLocationPathName(elementName.GetClearFilename(), locPath))
+ if(!mrResolver.FindLocationPathName(elementName.GetClearFilename(), locPath))
{
// Didn't find the location... so can't give the local filename
return false;
@@ -504,7 +509,7 @@ void BackupClientContext::SetKeepAliveTime(int iSeconds)
{
mKeepAliveTime = iSeconds < 0 ? 0 : iSeconds;
BOX_TRACE("Set keep-alive time to " << mKeepAliveTime << " seconds");
- mKeepAliveTimer = Timer(mKeepAliveTime);
+ mKeepAliveTimer = Timer(mKeepAliveTime, "KeepAliveTime");
}
// --------------------------------------------------------------------------
@@ -564,7 +569,7 @@ void BackupClientContext::DoKeepAlive()
BOX_TRACE("KeepAliveTime reached, sending keep-alive message");
mpConnection->QueryGetIsAlive();
- mKeepAliveTimer = Timer(mKeepAliveTime);
+ mKeepAliveTimer = Timer(mKeepAliveTime, "KeepAliveTime");
}
int BackupClientContext::GetMaximumDiffingTime()
diff --git a/bin/bbackupd/BackupClientContext.h b/bin/bbackupd/BackupClientContext.h
index 152d8556..4665df2b 100644
--- a/bin/bbackupd/BackupClientContext.h
+++ b/bin/bbackupd/BackupClientContext.h
@@ -12,6 +12,8 @@
#include "BoxTime.h"
#include "BackupClientDeleteList.h"
+#include "BackupClientDirectoryRecord.h"
+#include "BackupDaemonInterface.h"
#include "BackupStoreFile.h"
#include "ExcludeList.h"
#include "Timer.h"
@@ -25,6 +27,7 @@ class BackupStoreFilenameClear;
#include <string>
+
// --------------------------------------------------------------------------
//
// Class
@@ -38,13 +41,15 @@ class BackupClientContext : public DiffTimer
public:
BackupClientContext
(
- BackupDaemon &rDaemon,
+ LocationResolver &rResolver,
TLSContext &rTLSContext,
const std::string &rHostname,
+ int32_t Port,
int32_t AccountNumber,
bool ExtendedLogging,
bool ExtendedLogToFile,
- std::string ExtendedLogFile
+ std::string ExtendedLogFile,
+ ProgressNotifier &rProgressNotifier
);
virtual ~BackupClientContext();
private:
@@ -69,6 +74,7 @@ public:
int64_t GetClientStoreMarker() const {return mClientStoreMarker;}
bool StorageLimitExceeded() {return mStorageLimitExceeded;}
+ void SetStorageLimitExceeded() {mStorageLimitExceeded = true;}
// --------------------------------------------------------------------------
//
@@ -197,10 +203,16 @@ public:
virtual int GetMaximumDiffingTime();
virtual bool IsManaged() { return mbIsManaged; }
+ ProgressNotifier& GetProgressNotifier() const
+ {
+ return mrProgressNotifier;
+ }
+
private:
- BackupDaemon &mrDaemon;
+ LocationResolver &mrResolver;
TLSContext &mrTLSContext;
std::string mHostname;
+ int mPort;
int32_t mAccountNumber;
SocketStreamTLS *mpSocket;
BackupProtocolClient *mpConnection;
@@ -219,6 +231,7 @@ private:
bool mbIsManaged;
int mKeepAliveTime;
int mMaximumDiffingTime;
+ ProgressNotifier &mrProgressNotifier;
};
#endif // BACKUPCLIENTCONTEXT__H
diff --git a/bin/bbackupd/BackupClientDeleteList.cpp b/bin/bbackupd/BackupClientDeleteList.cpp
index f6d8e0dc..b9b5b53e 100644
--- a/bin/bbackupd/BackupClientDeleteList.cpp
+++ b/bin/bbackupd/BackupClientDeleteList.cpp
@@ -42,21 +42,38 @@ BackupClientDeleteList::~BackupClientDeleteList()
{
}
+BackupClientDeleteList::FileToDelete::FileToDelete(int64_t DirectoryID,
+ const BackupStoreFilename& rFilename,
+ const std::string& rLocalPath)
+: mDirectoryID(DirectoryID),
+ mFilename(rFilename),
+ mLocalPath(rLocalPath)
+{ }
+
+BackupClientDeleteList::DirToDelete::DirToDelete(int64_t ObjectID,
+ const std::string& rLocalPath)
+: mObjectID(ObjectID),
+ mLocalPath(rLocalPath)
+{ }
+
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t)
+// Name: BackupClientDeleteList::AddDirectoryDelete(int64_t,
+// const BackupStoreFilename&)
// Purpose: Add a directory to the list of directories to be deleted.
// Created: 10/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
+void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID,
+ const std::string& rLocalPath)
{
// Only add the delete to the list if it's not in the "no delete" set
- if(mDirectoryNoDeleteList.find(ObjectID) == mDirectoryNoDeleteList.end())
+ if(mDirectoryNoDeleteList.find(ObjectID) ==
+ mDirectoryNoDeleteList.end())
{
// Not in the list, so should delete it
- mDirectoryList.push_back(ObjectID);
+ mDirectoryList.push_back(DirToDelete(ObjectID, rLocalPath));
}
}
@@ -64,18 +81,22 @@ void BackupClientDeleteList::AddDirectoryDelete(int64_t ObjectID)
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDeleteList::AddFileDelete(int64_t, BackupStoreFilenameClear &)
+// Name: BackupClientDeleteList::AddFileDelete(int64_t,
+// const BackupStoreFilename &)
// Purpose:
// Created: 10/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename, const std::string& rLocalPath)
{
// Try to find it in the no delete list
- std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileNoDeleteList.begin());
+ std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator
+ delEntry(mFileNoDeleteList.begin());
while(delEntry != mFileNoDeleteList.end())
{
- if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ if((delEntry)->first == DirectoryID
+ && (delEntry)->second == rFilename)
{
// Found!
break;
@@ -86,7 +107,8 @@ void BackupClientDeleteList::AddFileDelete(int64_t DirectoryID, const BackupStor
// Only add it to the delete list if it wasn't in the no delete list
if(delEntry == mFileNoDeleteList.end())
{
- mFileList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
+ mFileList.push_back(FileToDelete(DirectoryID, rFilename,
+ rLocalPath));
}
}
@@ -113,18 +135,24 @@ void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
BackupProtocolClient &connection(rContext.GetConnection());
// Do the deletes
- for(std::vector<int64_t>::iterator i(mDirectoryList.begin()); i != mDirectoryList.end(); ++i)
+ for(std::vector<DirToDelete>::iterator i(mDirectoryList.begin());
+ i != mDirectoryList.end(); ++i)
{
- connection.QueryDeleteDirectory(*i);
+ connection.QueryDeleteDirectory(i->mObjectID);
+ rContext.GetProgressNotifier().NotifyDirectoryDeleted(
+ i->mObjectID, i->mLocalPath);
}
// Clear the directory list
mDirectoryList.clear();
// Delete the files
- for(std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator i(mFileList.begin()); i != mFileList.end(); ++i)
+ for(std::vector<FileToDelete>::iterator i(mFileList.begin());
+ i != mFileList.end(); ++i)
{
- connection.QueryDeleteFile(i->first, i->second);
+ connection.QueryDeleteFile(i->mDirectoryID, i->mFilename);
+ rContext.GetProgressNotifier().NotifyFileDeleted(
+ i->mDirectoryID, i->mLocalPath);
}
}
@@ -140,7 +168,15 @@ void BackupClientDeleteList::PerformDeletions(BackupClientContext &rContext)
void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
{
// First of all, is it in the delete vector?
- std::vector<int64_t>::iterator delEntry(std::find(mDirectoryList.begin(), mDirectoryList.end(), ObjectID));
+ std::vector<DirToDelete>::iterator delEntry(mDirectoryList.begin());
+ for(; delEntry != mDirectoryList.end(); delEntry++)
+ {
+ if(delEntry->mObjectID == ObjectID)
+ {
+ // Found!
+ break;
+ }
+ }
if(delEntry != mDirectoryList.end())
{
// erase this entry
@@ -148,7 +184,8 @@ void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
}
else
{
- // Haven't been asked to delete it yet, put it in the no delete list
+ // Haven't been asked to delete it yet, put it in the
+ // no delete list
mDirectoryNoDeleteList.insert(ObjectID);
}
}
@@ -162,13 +199,15 @@ void BackupClientDeleteList::StopDirectoryDeletion(int64_t ObjectID)
// Created: 19/11/03
//
// --------------------------------------------------------------------------
-void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename)
+void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename)
{
// Find this in the delete list
- std::vector<std::pair<int64_t, BackupStoreFilename> >::iterator delEntry(mFileList.begin());
+ std::vector<FileToDelete>::iterator delEntry(mFileList.begin());
while(delEntry != mFileList.end())
{
- if((delEntry)->first == DirectoryID && (delEntry)->second == rFilename)
+ if(delEntry->mDirectoryID == DirectoryID
+ && delEntry->mFilename == rFilename)
{
// Found!
break;
@@ -186,10 +225,5 @@ void BackupClientDeleteList::StopFileDeletion(int64_t DirectoryID, const BackupS
// Haven't been asked to delete it yet, put it in the no delete list
mFileNoDeleteList.push_back(std::pair<int64_t, BackupStoreFilename>(DirectoryID, rFilename));
}
-
}
-
-
-
-
diff --git a/bin/bbackupd/BackupClientDeleteList.h b/bin/bbackupd/BackupClientDeleteList.h
index 5940cf50..b0fbf51a 100644
--- a/bin/bbackupd/BackupClientDeleteList.h
+++ b/bin/bbackupd/BackupClientDeleteList.h
@@ -28,22 +28,46 @@ class BackupClientContext;
// --------------------------------------------------------------------------
class BackupClientDeleteList
{
+private:
+ class FileToDelete
+ {
+ public:
+ int64_t mDirectoryID;
+ BackupStoreFilename mFilename;
+ std::string mLocalPath;
+ FileToDelete(int64_t DirectoryID,
+ const BackupStoreFilename& rFilename,
+ const std::string& rLocalPath);
+ };
+
+ class DirToDelete
+ {
+ public:
+ int64_t mObjectID;
+ std::string mLocalPath;
+ DirToDelete(int64_t ObjectID, const std::string& rLocalPath);
+ };
+
public:
BackupClientDeleteList();
~BackupClientDeleteList();
- void AddDirectoryDelete(int64_t ObjectID);
- void AddFileDelete(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+ void AddDirectoryDelete(int64_t ObjectID,
+ const std::string& rLocalPath);
+ void AddFileDelete(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename,
+ const std::string& rLocalPath);
void StopDirectoryDeletion(int64_t ObjectID);
- void StopFileDeletion(int64_t DirectoryID, const BackupStoreFilename &rFilename);
+ void StopFileDeletion(int64_t DirectoryID,
+ const BackupStoreFilename &rFilename);
void PerformDeletions(BackupClientContext &rContext);
private:
- std::vector<int64_t> mDirectoryList;
+ std::vector<DirToDelete> mDirectoryList;
std::set<int64_t> mDirectoryNoDeleteList; // note: things only get in this list if they're not present in mDirectoryList when they are 'added'
- std::vector<std::pair<int64_t, BackupStoreFilename> > mFileList;
+ std::vector<FileToDelete> mFileList;
std::vector<std::pair<int64_t, BackupStoreFilename> > mFileNoDeleteList;
};
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.cpp b/bin/bbackupd/BackupClientDirectoryRecord.cpp
index 0a0703c2..b8d42d47 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.cpp
+++ b/bin/bbackupd/BackupClientDirectoryRecord.cpp
@@ -2,7 +2,8 @@
//
// File
// Name: BackupClientDirectoryRecord.cpp
-// Purpose: Implementation of record about directory for backup client
+// Purpose: Implementation of record about directory for
+// backup client
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
@@ -100,16 +101,27 @@ void BackupClientDirectoryRecord::DeleteSubDirectories()
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &, int64_t, const std::string &, bool)
-// Purpose: Syncronise, recusively, a local directory with the server.
+// Name: BackupClientDirectoryRecord::SyncDirectory(i
+// BackupClientDirectoryRecord::SyncParams &,
+// int64_t, const std::string &,
+// const std::string &, bool)
+// Purpose: Recursively synchronise a local directory
+// with the server.
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
-void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::SyncParams &rParams, int64_t ContainingDirectoryID,
- const std::string &rLocalPath, bool ThisDirHasJustBeenCreated)
+void BackupClientDirectoryRecord::SyncDirectory(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ int64_t ContainingDirectoryID,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ bool ThisDirHasJustBeenCreated)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
// Signal received by daemon?
- if(rParams.mrDaemon.StopRun())
+ if(rParams.mrRunStatusProvider.StopRun())
{
// Yes. Stop now.
THROW_EXCEPTION(BackupStoreException, SignalReceived)
@@ -118,49 +130,66 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Start by making some flag changes, marking this sync as not done,
// and on the immediate sub directories.
mSyncDone = false;
- for(std::map<std::string, BackupClientDirectoryRecord *>::iterator i = mSubDirectories.begin();
+ for(std::map<std::string, BackupClientDirectoryRecord *>::iterator
+ i = mSubDirectories.begin();
i != mSubDirectories.end(); ++i)
{
i->second->mSyncDone = false;
}
- // Work out the time in the future after which the file should be uploaded regardless.
- // This is a simple way to avoid having too many problems with file servers when they have
- // clients with badly out of sync clocks.
- rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() + rParams.mMaxFileTimeInFuture;
+ // Work out the time in the future after which the file should
+ // be uploaded regardless. This is a simple way to avoid having
+ // too many problems with file servers when they have clients
+ // with badly out of sync clocks.
+ rParams.mUploadAfterThisTimeInTheFuture = GetCurrentBoxTime() +
+ rParams.mMaxFileTimeInFuture;
- // Build the current state checksum to compare against while getting info from dirs
- // Note checksum is used locally only, so byte order isn't considered.
+ // Build the current state checksum to compare against while
+ // getting info from dirs. Note checksum is used locally only,
+ // so byte order isn't considered.
MD5Digest currentStateChecksum;
+ EMU_STRUCT_STAT dest_st;
// Stat the directory, to get attribute info
+ // If it's a symbolic link, we want the link target here
+ // (as we're about to back up the contents of the directory)
{
- struct stat st;
- if(::stat(rLocalPath.c_str(), &st) != 0)
+ if(EMU_STAT(rLocalPath.c_str(), &dest_st) != 0)
{
- // The directory has probably been deleted, so just ignore this error.
- // In a future scan, this deletion will be noticed, deleted from server, and this object deleted.
- rParams.GetProgressNotifier().NotifyDirStatFailed(
- this, rLocalPath, strerror(errno));
+ // The directory has probably been deleted, so
+ // just ignore this error. In a future scan, this
+ // deletion will be noticed, deleted from server,
+ // and this object deleted.
+ rNotifier.NotifyDirStatFailed(this, rLocalPath,
+ strerror(errno));
return;
}
- // Store inode number in map so directories are tracked in case they're renamed
+ // Store inode number in map so directories are tracked
+ // in case they're renamed
{
- BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
- idMap.AddToMap(st.st_ino, mObjectID, ContainingDirectoryID);
+ BackupClientInodeToIDMap &idMap(
+ rParams.mrContext.GetNewIDMap());
+ idMap.AddToMap(dest_st.st_ino, mObjectID,
+ ContainingDirectoryID);
}
// Add attributes to checksum
- currentStateChecksum.Add(&st.st_mode, sizeof(st.st_mode));
- currentStateChecksum.Add(&st.st_uid, sizeof(st.st_uid));
- currentStateChecksum.Add(&st.st_gid, sizeof(st.st_gid));
+ currentStateChecksum.Add(&dest_st.st_mode,
+ sizeof(dest_st.st_mode));
+ currentStateChecksum.Add(&dest_st.st_uid,
+ sizeof(dest_st.st_uid));
+ currentStateChecksum.Add(&dest_st.st_gid,
+ sizeof(dest_st.st_gid));
// Inode to be paranoid about things moving around
- currentStateChecksum.Add(&st.st_ino, sizeof(st.st_ino));
+ currentStateChecksum.Add(&dest_st.st_ino,
+ sizeof(dest_st.st_ino));
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
- currentStateChecksum.Add(&st.st_flags, sizeof(st.st_flags));
+ currentStateChecksum.Add(&dest_st.st_flags,
+ sizeof(dest_st.st_flags));
#endif
StreamableMemBlock xattr;
- BackupClientFileAttributes::FillExtendedAttr(xattr, rLocalPath.c_str());
+ BackupClientFileAttributes::FillExtendedAttr(xattr,
+ rLocalPath.c_str());
currentStateChecksum.Add(xattr.GetBuffer(), xattr.GetSize());
}
@@ -170,13 +199,13 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
std::vector<std::string> files;
bool downloadDirectoryRecordBecauseOfFutureFiles = false;
- struct stat dir_st;
- if(::lstat(rLocalPath.c_str(), &dir_st) != 0)
+ EMU_STRUCT_STAT link_st;
+ if(EMU_LSTAT(rLocalPath.c_str(), &link_st) != 0)
{
// Report the error (logs and
// eventual email to administrator)
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
- rLocalPath, strerror(errno));
+ rNotifier.NotifyFileStatFailed(this, rLocalPath,
+ strerror(errno));
// FIXME move to NotifyFileStatFailed()
SetErrorWhenReadingFilesystemObject(rParams,
@@ -192,8 +221,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
DIR *dirHandle = 0;
try
{
- rParams.GetProgressNotifier().NotifyScanDirectory(
- this, rLocalPath);
+ rNotifier.NotifyScanDirectory(this, rLocalPath);
dirHandle = ::opendir(rLocalPath.c_str());
if(dirHandle == 0)
@@ -202,17 +230,19 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// eventual email to administrator)
if (errno == EACCES)
{
- rParams.GetProgressNotifier().NotifyDirListFailed(
- this, rLocalPath, "Access denied");
+ rNotifier.NotifyDirListFailed(this,
+ rLocalPath, "Access denied");
}
else
{
- rParams.GetProgressNotifier().NotifyDirListFailed(this,
+ rNotifier.NotifyDirListFailed(this,
rLocalPath, strerror(errno));
}
- // Report the error (logs and eventual email to administrator)
- SetErrorWhenReadingFilesystemObject(rParams, rLocalPath.c_str());
+ // Report the error (logs and eventual email
+ // to administrator)
+ SetErrorWhenReadingFilesystemObject(rParams,
+ rLocalPath.c_str());
// Ignore this directory for now.
return;
}
@@ -228,14 +258,17 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
::memset(&checksum_info, 0, sizeof(checksum_info));
struct dirent *en = 0;
- struct stat st;
+ EMU_STRUCT_STAT file_st;
std::string filename;
while((en = ::readdir(dirHandle)) != 0)
{
rParams.mrContext.DoKeepAlive();
- // Don't need to use LinuxWorkaround_FinishDirentStruct(en, rLocalPath.c_str());
- // on Linux, as a stat is performed to get all this info
+ // Don't need to use
+ // LinuxWorkaround_FinishDirentStruct(en,
+ // rLocalPath.c_str());
+ // on Linux, as a stat is performed to
+ // get all this info
if(en->d_name[0] == '.' &&
(en->d_name[1] == '\0' || (en->d_name[1] == '.' && en->d_name[2] == '\0')))
@@ -259,11 +292,11 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// prefer S_IFREG, S_IFDIR...
int type = en->d_type;
#else
- if(::lstat(filename.c_str(), &st) != 0)
+ if(EMU_LSTAT(filename.c_str(), &file_st) != 0)
{
// Report the error (logs and
// eventual email to administrator)
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename, strerror(errno));
// FIXME move to NotifyFileStatFailed()
@@ -274,19 +307,18 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
continue;
}
- if(st.st_dev != dir_st.st_dev)
+ if(file_st.st_dev != dest_st.st_dev)
{
if(!(rParams.mrContext.ExcludeDir(
filename)))
{
- rParams.GetProgressNotifier()
- .NotifyMountPointSkipped(
- this, filename);
+ rNotifier.NotifyMountPointSkipped(
+ this, filename);
}
continue;
}
- int type = st.st_mode & S_IFMT;
+ int type = file_st.st_mode & S_IFMT;
#endif
if(type == S_IFREG || type == S_IFLNK)
@@ -296,8 +328,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeFile(filename))
{
- rParams.GetProgressNotifier()
- .NotifyFileExcluded(
+ rNotifier.NotifyFileExcluded(
this,
filename);
@@ -315,8 +346,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeDir(filename))
{
- rParams.GetProgressNotifier()
- .NotifyDirExcluded(
+ rNotifier.NotifyDirExcluded(
this,
filename);
@@ -327,19 +357,22 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Store on list
dirs.push_back(std::string(en->d_name));
}
+ else if (type == S_IFSOCK || type == S_IFIFO)
+ {
+ // removed notification for these types
+ // see Debian bug 479145, no objections
+ }
else
{
if(rParams.mrContext.ExcludeFile(filename))
{
- rParams.GetProgressNotifier()
- .NotifyFileExcluded(
+ rNotifier.NotifyFileExcluded(
this,
filename);
}
else
{
- rParams.GetProgressNotifier()
- .NotifyUnsupportedFileType(
+ rNotifier.NotifyUnsupportedFileType(
this, filename);
SetErrorWhenReadingFilesystemObject(
rParams, filename.c_str());
@@ -354,10 +387,9 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
#ifdef WIN32
// We didn't stat the file before,
// but now we need the information.
- if(::lstat(filename.c_str(), &st) != 0)
+ if(emu_stat(filename.c_str(), &file_st) != 0)
{
- rParams.GetProgressNotifier()
- .NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename,
strerror(errno));
@@ -370,18 +402,17 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
continue;
}
- if(st.st_dev != dir_st.st_dev)
+ if(file_st.st_dev != link_st.st_dev)
{
- rParams.GetProgressNotifier()
- .NotifyMountPointSkipped(this,
+ rNotifier.NotifyMountPointSkipped(this,
filename);
continue;
}
#endif
- checksum_info.mModificationTime = FileModificationTime(st);
- checksum_info.mAttributeModificationTime = FileAttrModificationTime(st);
- checksum_info.mSize = st.st_size;
+ checksum_info.mModificationTime = FileModificationTime(file_st);
+ checksum_info.mAttributeModificationTime = FileAttrModificationTime(file_st);
+ checksum_info.mSize = file_st.st_size;
currentStateChecksum.Add(&checksum_info, sizeof(checksum_info));
currentStateChecksum.Add(en->d_name, strlen(en->d_name));
@@ -394,7 +425,7 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Log that this has happened
if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
{
- rParams.GetProgressNotifier().NotifyFileModifiedInFuture(
+ rNotifier.NotifyFileModifiedInFuture(
this, filename);
rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
}
@@ -468,7 +499,8 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
}
// Do the directory reading
- bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath, pdirOnStore, entriesLeftOver, files, dirs);
+ bool updateCompleteSuccess = UpdateItems(rParams, rLocalPath,
+ rRemotePath, pdirOnStore, entriesLeftOver, files, dirs);
// LAST THING! (think exception safety)
// Store the new checksum -- don't fetch things unnecessarily in the future
@@ -604,11 +636,18 @@ void BackupClientDirectoryRecord::UpdateAttributes(BackupClientDirectoryRecord::
// Created: 2003/10/09
//
// --------------------------------------------------------------------------
-bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncParams &rParams,
- const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+bool BackupClientDirectoryRecord::UpdateItems(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ BackupStoreDirectory *pDirOnStore,
std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
- std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs)
+ std::vector<std::string> &rFiles,
+ const std::vector<std::string> &rDirs)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
bool allUpdatedSuccessfully = true;
// Decrypt all the directory entries.
@@ -634,7 +673,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
f != rFiles.end(); ++f)
{
// Send keep-alive message if needed
- rParams.mrContext.DoKeepAlive();
+ rContext.DoKeepAlive();
// Filename of this file
std::string filename(MakeFullPath(rLocalPath, *f));
@@ -648,10 +687,10 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// BLOCK
{
// Stat the file
- struct stat st;
- if(::lstat(filename.c_str(), &st) != 0)
+ EMU_STRUCT_STAT st;
+ if(EMU_LSTAT(filename.c_str(), &st) != 0)
{
- rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ rNotifier.NotifyFileStatFailed(this,
filename, strerror(errno));
// Report the error (logs and
@@ -689,7 +728,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) == 0))
{
// Directory exists in the place of this file -- sort it out
- RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore, en->GetObjectID(), *f);
+ RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore,
+ en, *f);
en = 0;
}
@@ -701,7 +741,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// 2) It's not in the store
// Do we know about the inode number?
- const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
+ const BackupClientInodeToIDMap &idMap(rContext.GetCurrentIDMap());
int64_t renameObjectID = 0, renameInDirectory = 0;
if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
{
@@ -711,24 +751,24 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
bool isCurrentVersion = false;
box_time_t srvModTime = 0, srvAttributesHash = 0;
BackupStoreFilenameClear oldLeafname;
- if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
+ if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion, &srvModTime, &srvAttributesHash, &oldLeafname))
{
// Only interested if it's a file and the latest version
if(!isDir && isCurrentVersion)
{
// Check that the object we found in the ID map doesn't exist on disc
- struct stat st;
- if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ EMU_STRUCT_STAT st;
+ if(EMU_STAT(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
{
// Doesn't exist locally, but does exist on the server.
// Therefore we can safely rename it to this new file.
// Get the connection to the server
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Only do this step if there is room on the server.
// This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ if(!rContext.StorageLimitExceeded())
{
// Rename the existing files (ie include old versions) on the server
connection.QueryMoveObject(renameObjectID, renameInDirectory, mObjectID /* move to this directory */,
@@ -736,7 +776,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
storeFilename);
// Stop the attempt to delete the file in the original location
- BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdelList(rContext.GetDeleteList());
rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
// Create new entry in the directory for it
@@ -799,13 +839,15 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
if (pDirOnStore != 0 && en == 0)
{
doUpload = true;
- BOX_TRACE(filename << ": will upload "
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will upload "
"(not on server)");
}
else if (modTime >= rParams.mSyncPeriodStart)
{
doUpload = true;
- BOX_TRACE(filename << ": will upload "
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will upload "
"(modified since last sync)");
}
}
@@ -823,7 +865,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
> rParams.mMaxUploadWait)
{
doUpload = true;
- BOX_TRACE(filename << ": will upload "
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will upload "
"(continually modified)");
}
@@ -840,7 +883,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
en->GetModificationTime() != modTime)
{
doUpload = true;
- BOX_TRACE(filename << ": will upload "
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will upload "
"(mod time changed)");
}
@@ -852,64 +896,121 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
rParams.mUploadAfterThisTimeInTheFuture)
{
doUpload = true;
- BOX_TRACE(filename << ": will upload "
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will upload "
"(mod time in the future)");
}
}
-
- if (!doUpload)
+
+ if (en != 0 && en->GetModificationTime() == modTime)
{
- BOX_TRACE(filename << ": will not upload "
- "(no reason to upload, mod time is "
- << modTime << " versus sync period "
- << rParams.mSyncPeriodStart << " to "
- << rParams.mSyncPeriodEnd << ")");
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will not upload "
+ "(not modified since last upload)");
+ }
+ else if (!doUpload)
+ {
+ if (modTime > rParams.mSyncPeriodEnd)
+ {
+ box_time_t now = GetCurrentBoxTime();
+ int age = BoxTimeToSeconds(now -
+ modTime);
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will not upload "
+ "(modified too recently: "
+ "only " << age << "seconds ago)");
+ }
+ else
+ {
+ BOX_TRACE("Upload decision: " <<
+ filename << ": will not upload "
+ "(mod time is " << modTime <<
+ " which is outside sync window, "
+ << rParams.mSyncPeriodStart << " to "
+ << rParams.mSyncPeriodEnd << ")");
+ }
}
+ bool fileSynced = true;
+
if (doUpload)
{
+ // Upload needed, don't mark sync success until
+ // we've actually done it
+ fileSynced = false;
+
// Make sure we're connected -- must connect here so we know whether
// the storage limit has been exceeded, and hence whether or not
// to actually upload the file.
- rParams.mrContext.GetConnection();
+ rContext.GetConnection();
// Only do this step if there is room on the server.
// This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ if(!rContext.StorageLimitExceeded())
{
- // Upload the file to the server, recording the object ID it returns
- bool noPreviousVersionOnServer = ((pDirOnStore != 0) && (en == 0));
+ // Upload the file to the server, recording the
+ // object ID it returns
+ bool noPreviousVersionOnServer =
+ ((pDirOnStore != 0) && (en == 0));
- // Surround this in a try/catch block, to catch errrors, but still continue
+ // Surround this in a try/catch block, to
+ // catch errors, but still continue
bool uploadSuccess = false;
try
{
- latestObjectID = UploadFile(rParams, filename, storeFilename, fileSize, modTime, attributesHash, noPreviousVersionOnServer);
- uploadSuccess = true;
+ latestObjectID = UploadFile(rParams,
+ filename, storeFilename,
+ fileSize, modTime,
+ attributesHash,
+ noPreviousVersionOnServer);
+
+ if (latestObjectID == 0)
+ {
+ // storage limit exceeded
+ rParams.mrContext.SetStorageLimitExceeded();
+ uploadSuccess = false;
+ allUpdatedSuccessfully = false;
+ }
+ else
+ {
+ uploadSuccess = true;
+ }
}
catch(ConnectionException &e)
{
- // Connection errors should just be passed on to the main handler, retries
- // would probably just cause more problems.
- rParams.GetProgressNotifier()
- .NotifyFileUploadException(
- this, filename, e);
+ // Connection errors should just be
+ // passed on to the main handler,
+ // retries would probably just cause
+ // more problems.
+ rNotifier.NotifyFileUploadException(
+ this, filename, e);
throw;
}
catch(BoxException &e)
{
- // an error occured -- make return code false, to show error in directory
+ if (e.GetType() == BackupStoreException::ExceptionType &&
+ e.GetSubType() == BackupStoreException::SignalReceived)
+ {
+ // abort requested, pass the
+ // exception on up.
+ throw;
+ }
+
+ // an error occured -- make return
+ // code false, to show error in directory
allUpdatedSuccessfully = false;
// Log it.
SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
- rParams.GetProgressNotifier()
- .NotifyFileUploadException(
- this, filename, e);
+ rNotifier.NotifyFileUploadException(
+ this, filename, e);
}
- // Update structures if the file was uploaded successfully.
+ // Update structures if the file was uploaded
+ // successfully.
if(uploadSuccess)
{
+ fileSynced = true;
+
// delete from pending entries
if(pendingFirstSeenTime != 0 && mpPendingEntries != 0)
{
@@ -919,28 +1020,41 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
else
{
- rParams.GetProgressNotifier().NotifyFileSkippedServerFull(this,
+ rNotifier.NotifyFileSkippedServerFull(this,
filename);
}
}
else if(en != 0 && en->GetAttributesHash() != attributesHash)
{
// Attributes have probably changed, upload them again.
- // If the attributes have changed enough, the directory hash will have changed too,
- // and so the dir will have been downloaded, and the entry will be available.
+ // If the attributes have changed enough, the directory
+ // hash will have changed too, and so the dir will have
+ // been downloaded, and the entry will be available.
// Get connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Only do this step if there is room on the server.
- // This step will be repeated later when there is space available
- if(!rParams.mrContext.StorageLimitExceeded())
+ // This step will be repeated later when there is
+ // space available
+ if(!rContext.StorageLimitExceeded())
{
- // Update store
- BackupClientFileAttributes attr;
- attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
- MemBlockStream attrStream(attr);
- connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
+ try
+ {
+ // Update store
+ BackupClientFileAttributes attr;
+ attr.ReadAttributes(filename.c_str(), false /* put mod times in the attributes, please */);
+ MemBlockStream attrStream(attr);
+ connection.QuerySetReplacementFileAttributes(mObjectID, attributesHash, storeFilename, attrStream);
+ fileSynced = true;
+ }
+ catch (BoxException &e)
+ {
+ BOX_ERROR("Failed to read or store "
+ "file attributes for '" <<
+ filename << "', will try "
+ "again later");
+ }
}
}
@@ -976,36 +1090,56 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
if(fileSize >= rParams.mFileTrackingSizeThreshold)
{
// Get the map
- BackupClientInodeToIDMap &idMap(rParams.mrContext.GetNewIDMap());
+ BackupClientInodeToIDMap &idMap(rContext.GetNewIDMap());
// Need to get an ID from somewhere...
if(latestObjectID != 0)
{
// Use this one
+ BOX_TRACE("Storing uploaded file ID " <<
+ inodeNum << " (" << filename << ") "
+ "in ID map as object " <<
+ latestObjectID << " with parent " <<
+ mObjectID);
idMap.AddToMap(inodeNum, latestObjectID, mObjectID /* containing directory */);
}
else
{
// Don't know it -- haven't sent anything to the store, and didn't get a listing.
// Look it up in the current map, and if it's there, use that.
- const BackupClientInodeToIDMap &currentIDMap(rParams.mrContext.GetCurrentIDMap());
+ const BackupClientInodeToIDMap &currentIDMap(rContext.GetCurrentIDMap());
int64_t objid = 0, dirid = 0;
if(currentIDMap.Lookup(inodeNum, objid, dirid))
{
// Found
+ if (dirid != mObjectID)
+ {
+ BOX_WARNING("Found conflicting parent ID for file ID " << inodeNum << " (" << filename << "): expected " << mObjectID << " but found " << dirid << " (same directory used in two different locations?)");
+ }
+
ASSERT(dirid == mObjectID);
+
// NOTE: If the above assert fails, an inode number has been reused by the OS,
// or there is a problem somewhere. If this happened on a short test run, look
// into it. However, in a long running process this may happen occasionally and
- // not indiciate anything wrong.
+ // not indicate anything wrong.
// Run the release version for real life use, where this check is not made.
- idMap.AddToMap(inodeNum, objid, mObjectID /* containing directory */);
+ BOX_TRACE("Storing found file ID " <<
+ inodeNum << " (" << filename <<
+ ") in ID map as object " <<
+ objid << " with parent " <<
+ mObjectID);
+ idMap.AddToMap(inodeNum, objid,
+ mObjectID /* containing directory */);
}
}
}
- rParams.GetProgressNotifier().NotifyFileSynchronised(this,
- filename, fileSize);
+ if (fileSynced)
+ {
+ rNotifier.NotifyFileSynchronised(this, filename,
+ fileSize);
+ }
}
// Erase contents of files to save space when recursing
@@ -1014,7 +1148,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Delete the pending entries, if the map is entry
if(mpPendingEntries != 0 && mpPendingEntries->size() == 0)
{
- TRACE1("Deleting mpPendingEntries from dir ID %lld\n", mObjectID);
+ BOX_TRACE("Deleting mpPendingEntries from dir ID " <<
+ BOX_FORMAT_OBJECTID(mObjectID));
delete mpPendingEntries;
mpPendingEntries = 0;
}
@@ -1024,7 +1159,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
d != rDirs.end(); ++d)
{
// Send keep-alive message if needed
- rParams.mrContext.DoKeepAlive();
+ rContext.DoKeepAlive();
// Get the local filename
std::string dirname(MakeFullPath(rLocalPath, *d));
@@ -1044,21 +1179,27 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Check that the entry which might have been found is in fact a directory
if((en != 0) && ((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == 0))
{
- // Entry exists, but is not a directory. Bad. Get rid of it.
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ // Entry exists, but is not a directory. Bad.
+ // Get rid of it.
+ BackupProtocolClient &connection(rContext.GetConnection());
connection.QueryDeleteFile(mObjectID /* in directory */, storeFilename);
+ rNotifier.NotifyFileDeleted(en->GetObjectID(),
+ storeFilename.GetClearFilename());
// Nothing found
en = 0;
}
- // Flag for having created directory, so can optimise the recusive call not to
- // read it again, because we know it's empty.
+ // Flag for having created directory, so can optimise the
+ // recursive call not to read it again, because we know
+ // it's empty.
bool haveJustCreatedDirOnServer = false;
// Next, see if it's in the list of sub directories
BackupClientDirectoryRecord *psubDirRecord = 0;
- std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(*d));
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator
+ e(mSubDirectories.find(*d));
+
if(e != mSubDirectories.end())
{
// In the list, just use this pointer
@@ -1080,7 +1221,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// No. Exists on the server, and we know about it from the listing.
subDirObjectID = en->GetObjectID();
}
- else if(rParams.mrContext.StorageLimitExceeded())
+ else if(rContext.StorageLimitExceeded())
// know we've got a connection if we get this far,
// as dir will have been modified.
{
@@ -1098,29 +1239,47 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
box_time_t attrModTime = 0;
InodeRefType inodeNum = 0;
BackupClientFileAttributes attr;
- attr.ReadAttributes(dirname.c_str(), true /* directories have zero mod times */,
- 0 /* not interested in mod time */, &attrModTime, 0 /* not file size */,
- &inodeNum);
+ bool failedToReadAttributes = false;
+
+ try
+ {
+ attr.ReadAttributes(dirname.c_str(),
+ true /* directories have zero mod times */,
+ 0 /* not interested in mod time */,
+ &attrModTime, 0 /* not file size */,
+ &inodeNum);
+ }
+ catch (BoxException &e)
+ {
+ BOX_WARNING("Failed to read attributes "
+ "of directory, cannot check "
+ "for rename, assuming new: '"
+ << dirname << "'");
+ failedToReadAttributes = true;
+ }
// Check to see if the directory been renamed
// First, do we have a record in the ID map?
int64_t renameObjectID = 0, renameInDirectory = 0;
bool renameDir = false;
- const BackupClientInodeToIDMap &idMap(rParams.mrContext.GetCurrentIDMap());
- if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ const BackupClientInodeToIDMap &idMap(
+ rContext.GetCurrentIDMap());
+
+ if(!failedToReadAttributes && idMap.Lookup(inodeNum,
+ renameObjectID, renameInDirectory))
{
// Look up on the server to get the name, to build the local filename
std::string localPotentialOldName;
bool isDir = false;
bool isCurrentVersion = false;
- if(rParams.mrContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
+ if(rContext.FindFilename(renameObjectID, renameInDirectory, localPotentialOldName, isDir, isCurrentVersion))
{
// Only interested if it's a directory
if(isDir && isCurrentVersion)
{
// Check that the object doesn't exist already
- struct stat st;
- if(::stat(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
+ EMU_STRUCT_STAT st;
+ if(EMU_STAT(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
{
// Doesn't exist locally, but does exist on the server.
// Therefore we can safely rename it.
@@ -1131,7 +1290,7 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
// Get connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Don't do a check for storage limit exceeded here, because if we get to this
// stage, a connection will have been opened, and the status known, so the check
@@ -1151,7 +1310,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
connection.QueryChangeDirAttributes(renameObjectID, attrModTime, attrStream);
// Stop it being deleted later
- BackupClientDeleteList &rdelList(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdelList(
+ rContext.GetDeleteList());
rdelList.StopDirectoryDeletion(renameObjectID);
// This is the ID for the renamed directory
@@ -1188,12 +1348,14 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
}
- ASSERT(psubDirRecord != 0 || rParams.mrContext.StorageLimitExceeded());
+ ASSERT(psubDirRecord != 0 || rContext.StorageLimitExceeded());
if(psubDirRecord)
{
// Sync this sub directory too
- psubDirRecord->SyncDirectory(rParams, mObjectID, dirname, haveJustCreatedDirOnServer);
+ psubDirRecord->SyncDirectory(rParams, mObjectID,
+ dirname, rRemotePath + "/" + *d,
+ haveJustCreatedDirOnServer);
}
// Zero pointer in rEntriesLeftOver, if we have a pointer to zero
@@ -1222,20 +1384,27 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// to a list, which is actually deleted at the very end of the session.
// If there's an error during the process, it doesn't matter if things
// aren't actually deleted, as the whole state will be reset anyway.
- BackupClientDeleteList &rdel(rParams.mrContext.GetDeleteList());
+ BackupClientDeleteList &rdel(rContext.GetDeleteList());
+
+ BackupStoreFilenameClear clear(en->GetName());
+ std::string localName = MakeFullPath(rLocalPath,
+ clear.GetClearFilename());
// Delete this entry -- file or directory?
if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
{
// Set a pending deletion for the file
- rdel.AddFileDelete(mObjectID, en->GetName());
+ rdel.AddFileDelete(mObjectID, en->GetName(),
+ localName);
}
else if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) != 0)
{
// Set as a pending deletion for the directory
- rdel.AddDirectoryDelete(en->GetObjectID());
+ rdel.AddDirectoryDelete(en->GetObjectID(),
+ localName);
- // If there's a directory record for it in the sub directory map, delete it now
+ // If there's a directory record for it in
+ // the sub directory map, delete it now
BackupStoreFilenameClear dirname(en->GetName());
std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(dirname.GetClearFilename()));
if(e != mSubDirectories.end())
@@ -1249,8 +1418,8 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
rLocalPath,
dirname.GetClearFilename());
- TRACE1("Deleted directory record for "
- "%s\n", name.c_str());
+ BOX_TRACE("Deleted directory record "
+ "for " << name);
}
}
}
@@ -1270,14 +1439,24 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Created: 9/7/04
//
// --------------------------------------------------------------------------
-void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename)
+void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(
+ SyncParams &rParams,
+ BackupStoreDirectory* pDirOnStore,
+ BackupStoreDirectory::Entry* pEntry,
+ const std::string &rFilename)
{
// First, delete the directory
BackupProtocolClient &connection(rParams.mrContext.GetConnection());
- connection.QueryDeleteDirectory(ObjectID);
+ connection.QueryDeleteDirectory(pEntry->GetObjectID());
+
+ BackupStoreFilenameClear clear(pEntry->GetName());
+ rParams.mrContext.GetProgressNotifier().NotifyDirectoryDeleted(
+ pEntry->GetObjectID(), clear.GetClearFilename());
// Then, delete any directory record
- std::map<std::string, BackupClientDirectoryRecord *>::iterator e(mSubDirectories.find(rFilename));
+ std::map<std::string, BackupClientDirectoryRecord *>::iterator
+ e(mSubDirectories.find(rFilename));
+
if(e != mSubDirectories.end())
{
// A record exists for this, remove it
@@ -1294,16 +1473,30 @@ void BackupClientDirectoryRecord::RemoveDirectoryInPlaceOfFile(SyncParams &rPara
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &, const std::string &, const BackupStoreFilename &, int64_t, box_time_t, box_time_t, bool)
-// Purpose: Private. Upload a file to the server -- may send a patch instead of the whole thing
+// Name: BackupClientDirectoryRecord::UploadFile(
+// BackupClientDirectoryRecord::SyncParams &,
+// const std::string &,
+// const BackupStoreFilename &,
+// int64_t, box_time_t, box_time_t, bool)
+// Purpose: Private. Upload a file to the server. May send
+// a patch instead of the whole thing
// Created: 20/1/04
//
// --------------------------------------------------------------------------
-int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
- int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer)
+int64_t BackupClientDirectoryRecord::UploadFile(
+ BackupClientDirectoryRecord::SyncParams &rParams,
+ const std::string &rFilename,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize,
+ box_time_t ModificationTime,
+ box_time_t AttributesHash,
+ bool NoPreviousVersionOnServer)
{
+ BackupClientContext& rContext(rParams.mrContext);
+ ProgressNotifier& rNotifier(rContext.GetProgressNotifier());
+
// Get the connection
- BackupProtocolClient &connection(rParams.mrContext.GetConnection());
+ BackupProtocolClient &connection(rContext.GetConnection());
// Info
int64_t objID = 0;
@@ -1312,8 +1505,10 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
// Use a try block to catch store full errors
try
{
- // Might an old version be on the server, and is the file size over the diffing threshold?
- if(!NoPreviousVersionOnServer && FileSize >= rParams.mDiffingUploadSizeThreshold)
+ // Might an old version be on the server, and is the file
+ // size over the diffing threshold?
+ if(!NoPreviousVersionOnServer &&
+ FileSize >= rParams.mDiffingUploadSizeThreshold)
{
// YES -- try to do diff, if possible
// First, query the server to see if there's an old version available
@@ -1323,7 +1518,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(diffFromID != 0)
{
// Found an old version
- rParams.GetProgressNotifier().NotifyFileUploadingPatch(this,
+ rNotifier.NotifyFileUploadingPatch(this,
rFilename);
// Get the index
@@ -1333,7 +1528,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
// Diff the file
//
- rParams.mrContext.ManageDiffProcess();
+ rContext.ManageDiffProcess();
bool isCompletelyDifferent = false;
std::auto_ptr<IOStream> patchStream(
@@ -1342,11 +1537,11 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
mObjectID, /* containing directory */
rStoreFilename, diffFromID, *blockIndexStream,
connection.GetTimeout(),
- &rParams.mrContext, // DiffTimer implementation
+ &rContext, // DiffTimer implementation
0 /* not interested in the modification time */,
&isCompletelyDifferent));
- rParams.mrContext.UnManageDiffProcess();
+ rContext.UnManageDiffProcess();
//
// Upload the patch to the store
@@ -1354,6 +1549,9 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
std::auto_ptr<BackupProtocolClientSuccess> stored(connection.QueryStoreFile(mObjectID, ModificationTime,
AttributesHash, isCompletelyDifferent?(0):(diffFromID), rStoreFilename, *patchStream));
+ // Get object ID from the result
+ objID = stored->GetObjectID();
+
// Don't attempt to upload it again!
doNormalUpload = false;
}
@@ -1362,13 +1560,14 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(doNormalUpload)
{
// below threshold or nothing to diff from, so upload whole
- rParams.GetProgressNotifier().NotifyFileUploading(this,
- rFilename);
+ rNotifier.NotifyFileUploading(this, rFilename);
// Prepare to upload, getting a stream which will encode the file as we go along
std::auto_ptr<IOStream> upload(
BackupStoreFile::EncodeFile(rFilename.c_str(),
- mObjectID, rStoreFilename));
+ mObjectID, rStoreFilename, NULL,
+ &rParams,
+ &(rParams.mrRunStatusProvider)));
// Send to store
std::auto_ptr<BackupProtocolClientSuccess> stored(
@@ -1384,9 +1583,10 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
}
catch(BoxException &e)
{
- rParams.mrContext.UnManageDiffProcess();
+ rContext.UnManageDiffProcess();
- if(e.GetType() == ConnectionException::ExceptionType && e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
+ if(e.GetType() == ConnectionException::ExceptionType &&
+ e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
{
// Check and see what error the protocol has,
// this is more useful to users than the exception.
@@ -1397,11 +1597,15 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
&& subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
{
// The hard limit was exceeded on the server, notify!
- rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ rParams.mrSysadminNotifier.NotifySysadmin(
+ SysadminNotifier::StoreFull);
+ // return an error code instead of
+ // throwing an exception that we
+ // can't debug.
+ return 0;
}
- rParams.GetProgressNotifier()
- .NotifyFileUploadServerError(
- this, rFilename, type, subtype);
+ rNotifier.NotifyFileUploadServerError(this,
+ rFilename, type, subtype);
}
}
@@ -1409,7 +1613,7 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
throw;
}
- rParams.GetProgressNotifier().NotifyFileUploaded(this, rFilename, FileSize);
+ rNotifier.NotifyFileUploaded(this, rFilename, FileSize);
// Return the new object ID of this file
return objID;
@@ -1450,16 +1654,20 @@ void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClie
// Created: 8/3/04
//
// --------------------------------------------------------------------------
-BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon,
- ProgressNotifier &rProgressNotifier, BackupClientContext &rContext)
- : mrProgressNotifier(rProgressNotifier),
- mSyncPeriodStart(0),
+BackupClientDirectoryRecord::SyncParams::SyncParams(
+ RunStatusProvider &rRunStatusProvider,
+ SysadminNotifier &rSysadminNotifier,
+ ProgressNotifier &rProgressNotifier,
+ BackupClientContext &rContext)
+ : mSyncPeriodStart(0),
mSyncPeriodEnd(0),
mMaxUploadWait(0),
mMaxFileTimeInFuture(99999999999999999LL),
mFileTrackingSizeThreshold(16*1024),
mDiffingUploadSizeThreshold(16*1024),
- mrDaemon(rDaemon),
+ mrRunStatusProvider(rRunStatusProvider),
+ mrSysadminNotifier(rSysadminNotifier),
+ mrProgressNotifier(rProgressNotifier),
mrContext(rContext),
mReadErrorsOnFilesystemObjects(false),
mUploadAfterThisTimeInTheFuture(99999999999999999LL),
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.h b/bin/bbackupd/BackupClientDirectoryRecord.h
index 9e4dda7a..fce3fc04 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.h
+++ b/bin/bbackupd/BackupClientDirectoryRecord.h
@@ -13,10 +13,13 @@
#include <string>
#include <map>
-#include "BoxTime.h"
#include "BackupClientFileAttributes.h"
+#include "BackupDaemonInterface.h"
#include "BackupStoreDirectory.h"
+#include "BoxTime.h"
#include "MD5Digest.h"
+#include "ReadLoggingStream.h"
+#include "RunStatusProvider.h"
class Archive;
class BackupClientContext;
@@ -25,82 +28,6 @@ class BackupDaemon;
// --------------------------------------------------------------------------
//
// Class
-// Name: ProgressNotifier
-// Purpose: Provides methods for the backup library to inform the user
-// interface about its progress with the backup
-// Created: 2005/11/20
-//
-// --------------------------------------------------------------------------
-class BackupClientDirectoryRecord;
-
-class ProgressNotifier
-{
- public:
- virtual ~ProgressNotifier() { }
- virtual void NotifyScanDirectory(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyDirStatFailed(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- const std::string& rErrorMsg) = 0;
- virtual void NotifyFileStatFailed(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- const std::string& rErrorMsg) = 0;
- virtual void NotifyDirListFailed(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- const std::string& rErrorMsg) = 0;
- virtual void NotifyMountPointSkipped(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileExcluded(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyDirExcluded(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyUnsupportedFileType(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileReadFailed(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- const std::string& rErrorMsg) = 0;
- virtual void NotifyFileModifiedInFuture(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileSkippedServerFull(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileUploadException(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- const BoxException& rException) = 0;
- virtual void NotifyFileUploadServerError(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- int type, int subtype) = 0;
- virtual void NotifyFileUploading(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileUploadingPatch(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath) = 0;
- virtual void NotifyFileUploaded(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- int64_t FileSize) = 0;
- virtual void NotifyFileSynchronised(
- const BackupClientDirectoryRecord* pDirRecord,
- const std::string& rLocalPath,
- int64_t FileSize) = 0;
-};
-
-// --------------------------------------------------------------------------
-//
-// Class
// Name: BackupClientDirectoryRecord
// Purpose: Implementation of record about directory for backup client
// Created: 2003/10/08
@@ -132,11 +59,12 @@ public:
// Created: 8/3/04
//
// --------------------------------------------------------------------------
- class SyncParams
+ class SyncParams : public ReadLoggingStream::Logger
{
public:
SyncParams(
- BackupDaemon &rDaemon,
+ RunStatusProvider &rRunStatusProvider,
+ SysadminNotifier &rSysadminNotifier,
ProgressNotifier &rProgressNotifier,
BackupClientContext &rContext);
~SyncParams();
@@ -144,7 +72,6 @@ public:
// No copying
SyncParams(const SyncParams&);
SyncParams &operator=(const SyncParams&);
- ProgressNotifier &mrProgressNotifier;
public:
// Data members are public, as accessors are not justified here
@@ -154,7 +81,9 @@ public:
box_time_t mMaxFileTimeInFuture;
int32_t mFileTrackingSizeThreshold;
int32_t mDiffingUploadSizeThreshold;
- BackupDaemon &mrDaemon;
+ RunStatusProvider &mrRunStatusProvider;
+ SysadminNotifier &mrSysadminNotifier;
+ ProgressNotifier &mrProgressNotifier;
BackupClientContext &mrContext;
bool mReadErrorsOnFilesystemObjects;
@@ -162,41 +91,79 @@ public:
box_time_t mUploadAfterThisTimeInTheFuture;
bool mHaveLoggedWarningAboutFutureFileTimes;
+ bool StopRun() { return mrRunStatusProvider.StopRun(); }
+ void NotifySysadmin(SysadminNotifier::EventCode Event)
+ {
+ mrSysadminNotifier.NotifySysadmin(Event);
+ }
ProgressNotifier& GetProgressNotifier() const
{
return mrProgressNotifier;
}
+
+ /* ReadLoggingStream::Logger implementation */
+ virtual void Log(int64_t readSize, int64_t offset,
+ int64_t length, box_time_t elapsed, box_time_t finish)
+ {
+ mrProgressNotifier.NotifyReadProgress(readSize, offset,
+ length, elapsed, finish);
+ }
+ virtual void Log(int64_t readSize, int64_t offset,
+ int64_t length)
+ {
+ mrProgressNotifier.NotifyReadProgress(readSize, offset,
+ length);
+ }
+ virtual void Log(int64_t readSize, int64_t offset)
+ {
+ mrProgressNotifier.NotifyReadProgress(readSize, offset);
+ }
};
- void SyncDirectory(SyncParams &rParams, int64_t ContainingDirectoryID, const std::string &rLocalPath,
+ void SyncDirectory(SyncParams &rParams,
+ int64_t ContainingDirectoryID,
+ const std::string &rLocalPath,
+ const std::string &rRemotePath,
bool ThisDirHasJustBeenCreated = false);
private:
void DeleteSubDirectories();
BackupStoreDirectory *FetchDirectoryListing(SyncParams &rParams);
- void UpdateAttributes(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, const std::string &rLocalPath);
- bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath, BackupStoreDirectory *pDirOnStore,
+ void UpdateAttributes(SyncParams &rParams,
+ BackupStoreDirectory *pDirOnStore,
+ const std::string &rLocalPath);
+ bool UpdateItems(SyncParams &rParams, const std::string &rLocalPath,
+ const std::string &rRemotePath,
+ BackupStoreDirectory *pDirOnStore,
std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
- std::vector<std::string> &rFiles, const std::vector<std::string> &rDirs);
- int64_t UploadFile(SyncParams &rParams, const std::string &rFilename, const BackupStoreFilename &rStoreFilename,
- int64_t FileSize, box_time_t ModificationTime, box_time_t AttributesHash, bool NoPreviousVersionOnServer);
- void SetErrorWhenReadingFilesystemObject(SyncParams &rParams, const char *Filename);
- void RemoveDirectoryInPlaceOfFile(SyncParams &rParams, BackupStoreDirectory *pDirOnStore, int64_t ObjectID, const std::string &rFilename);
+ std::vector<std::string> &rFiles,
+ const std::vector<std::string> &rDirs);
+ int64_t UploadFile(SyncParams &rParams,
+ const std::string &rFilename,
+ const BackupStoreFilename &rStoreFilename,
+ int64_t FileSize, box_time_t ModificationTime,
+ box_time_t AttributesHash, bool NoPreviousVersionOnServer);
+ void SetErrorWhenReadingFilesystemObject(SyncParams &rParams,
+ const char *Filename);
+ void RemoveDirectoryInPlaceOfFile(SyncParams &rParams,
+ BackupStoreDirectory* pDirOnStore,
+ BackupStoreDirectory::Entry* pEntry,
+ const std::string &rFilename);
private:
- int64_t mObjectID;
+ int64_t mObjectID;
std::string mSubDirName;
- bool mInitialSyncDone;
- bool mSyncDone;
+ bool mInitialSyncDone;
+ bool mSyncDone;
// Checksum of directory contents and attributes, used to detect changes
uint8_t mStateChecksum[MD5Digest::DigestLength];
- std::map<std::string, box_time_t> *mpPendingEntries;
- std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
+ std::map<std::string, box_time_t> *mpPendingEntries;
+ std::map<std::string, BackupClientDirectoryRecord *> mSubDirectories;
// mpPendingEntries is a pointer rather than simple a member
- // variables, because most of the time it'll be empty. This would waste a lot
- // of memory because of STL allocation policies.
+ // variable, because most of the time it'll be empty. This would
+ // waste a lot of memory because of STL allocation policies.
};
#endif // BACKUPCLIENTDIRECTORYRECORD__H
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.cpp b/bin/bbackupd/BackupClientInodeToIDMap.cpp
index 0d4fd507..b9f56c5a 100644
--- a/bin/bbackupd/BackupClientInodeToIDMap.cpp
+++ b/bin/bbackupd/BackupClientInodeToIDMap.cpp
@@ -217,13 +217,16 @@ void BackupClientInodeToIDMap::AddToMap(InodeRefType InodeRef, int64_t ObjectID,
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientInodeToIDMap::Lookup(InodeRefType, int64_t &, int64_t &) const
-// Purpose: Looks up an inode in the map, returning true if it exists, and the object
-// ids of it and the directory it's in the reference arguments.
+// Name: BackupClientInodeToIDMap::Lookup(InodeRefType,
+// int64_t &, int64_t &) const
+// Purpose: Looks up an inode in the map, returning true if it
+// exists, and the object ids of it and the directory
+// it's in the reference arguments.
// Created: 11/11/03
//
// --------------------------------------------------------------------------
-bool BackupClientInodeToIDMap::Lookup(InodeRefType InodeRef, int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const
+bool BackupClientInodeToIDMap::Lookup(InodeRefType InodeRef,
+ int64_t &rObjectIDOut, int64_t &rInDirectoryOut) const
{
#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
std::map<InodeRefType, std::pair<int64_t, int64_t> >::const_iterator i(mMap.find(InodeRef));
diff --git a/bin/bbackupd/BackupDaemon.cpp b/bin/bbackupd/BackupDaemon.cpp
index e762bbdc..3615b848 100644
--- a/bin/bbackupd/BackupDaemon.cpp
+++ b/bin/bbackupd/BackupDaemon.cpp
@@ -10,6 +10,7 @@
#include "Box.h"
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#ifdef HAVE_UNISTD_H
@@ -47,36 +48,34 @@
#include "BoxPortsAndFiles.h"
#include "SSLLib.h"
-#include "TLSContext.h"
-#include "BackupDaemon.h"
-#include "BackupDaemonConfigVerify.h"
+#include "autogen_BackupProtocolClient.h"
+#include "autogen_ClientException.h"
+#include "autogen_ConversionException.h"
+#include "Archive.h"
#include "BackupClientContext.h"
+#include "BackupClientCryptoKeys.h"
#include "BackupClientDirectoryRecord.h"
-#include "BackupStoreDirectory.h"
#include "BackupClientFileAttributes.h"
-#include "BackupStoreFilenameClear.h"
#include "BackupClientInodeToIDMap.h"
-#include "autogen_BackupProtocolClient.h"
-#include "autogen_ConversionException.h"
-#include "BackupClientCryptoKeys.h"
-#include "BannerText.h"
+#include "BackupClientMakeExcludeList.h"
+#include "BackupDaemon.h"
+#include "BackupDaemonConfigVerify.h"
+#include "BackupStoreConstants.h"
+#include "BackupStoreDirectory.h"
+#include "BackupStoreException.h"
#include "BackupStoreFile.h"
-#include "Random.h"
+#include "BackupStoreFilenameClear.h"
+#include "BannerText.h"
+#include "Conversion.h"
#include "ExcludeList.h"
-#include "BackupClientMakeExcludeList.h"
-#include "IOStreamGetLine.h"
-#include "Utils.h"
#include "FileStream.h"
-#include "BackupStoreException.h"
-#include "BackupStoreConstants.h"
-#include "LocalProcessStream.h"
#include "IOStreamGetLine.h"
-#include "Conversion.h"
-#include "Archive.h"
-#include "Timer.h"
+#include "LocalProcessStream.h"
#include "Logging.h"
-#include "autogen_ClientException.h"
+#include "Random.h"
+#include "Timer.h"
+#include "Utils.h"
#ifdef WIN32
#include "Win32ServiceFunctions.h"
@@ -93,25 +92,6 @@ static const time_t MAX_SLEEP_TIME = 1024;
// This prevents repetative cycles of load on the server
#define SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY 6
-#ifdef WIN32
-// --------------------------------------------------------------------------
-//
-// Function
-// Name: HelperThread()
-// Purpose: Background thread function, called by Windows,
-// calls the BackupDaemon's RunHelperThread method
-// to listen for and act on control communications
-// Created: 18/2/04
-//
-// --------------------------------------------------------------------------
-unsigned int WINAPI HelperThread(LPVOID lpParam)
-{
- ((BackupDaemon *)lpParam)->RunHelperThread();
-
- return 0;
-}
-#endif
-
// --------------------------------------------------------------------------
//
// Function
@@ -122,9 +102,23 @@ unsigned int WINAPI HelperThread(LPVOID lpParam)
// --------------------------------------------------------------------------
BackupDaemon::BackupDaemon()
: mState(BackupDaemon::State_Initialising),
- mpCommandSocketInfo(0),
+ mDeleteRedundantLocationsAfter(0),
+ mLastNotifiedEvent(SysadminNotifier::MAX),
mDeleteUnusedRootDirEntriesAfter(0),
- mLogAllFileAccess(false)
+ mClientStoreMarker(BackupClientContext::ClientStoreMarker_NotKnown),
+ mStorageLimitExceeded(false),
+ mReadErrorsOnFilesystemObjects(false),
+ mLastSyncTime(0),
+ mNextSyncTime(0),
+ mCurrentSyncStartTime(0),
+ mUpdateStoreInterval(0),
+ mDeleteStoreObjectInfoFile(false),
+ mDoSyncForcedByPreviousSyncError(false),
+ mLogAllFileAccess(false),
+ mpProgressNotifier(this),
+ mpLocationResolver(this),
+ mpRunStatusProvider(this),
+ mpSysadminNotifier(this)
#ifdef WIN32
, mInstallService(false),
mRemoveService(false),
@@ -134,40 +128,6 @@ BackupDaemon::BackupDaemon()
{
// Only ever one instance of a daemon
SSLLib::Initialise();
-
- // Initialise notification sent status
- for(int l = 0; l < NotifyEvent__MAX; ++l)
- {
- mNotificationsSent[l] = false;
- }
-
- #ifdef WIN32
- // Create the event object to signal from main thread to
- // worker when new messages are queued to be sent to the
- // command socket.
-
- mhMessageToSendEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
- if(mhMessageToSendEvent == INVALID_HANDLE_VALUE)
- {
- BOX_ERROR("Failed to create event object: error " <<
- GetLastError());
- exit(1);
- }
-
- // Create the event object to signal from worker to main thread
- // when a command has been received on the command socket.
-
- mhCommandReceivedEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
- if(mhCommandReceivedEvent == INVALID_HANDLE_VALUE)
- {
- BOX_ERROR("Failed to create event object: error " <<
- GetLastError());
- exit(1);
- }
-
- // Create the critical section to protect the message queue
- InitializeCriticalSection(&mMessageQueueLock);
- #endif
}
// --------------------------------------------------------------------------
@@ -182,12 +142,6 @@ BackupDaemon::~BackupDaemon()
{
DeleteAllLocations();
DeleteAllIDMaps();
-
- if(mpCommandSocketInfo != 0)
- {
- delete mpCommandSocketInfo;
- mpCommandSocketInfo = 0;
- }
}
// --------------------------------------------------------------------------
@@ -262,12 +216,12 @@ void BackupDaemon::SetupInInitialProcess()
if(GetConfiguration().KeyExists("CommandSocket"))
{
BOX_WARNING(
- "==============================================================================\n"
- "SECURITY WARNING: This platform cannot check the credentials of connections to\n"
- "the command socket. This is a potential DoS security problem.\n"
- "Remove the CommandSocket directive from the bbackupd.conf file if bbackupctl\n"
- "is not used.\n"
- "==============================================================================\n"
+ "==============================================================================\n"
+ "SECURITY WARNING: This platform cannot check the credentials of connections to\n"
+ "the command socket. This is a potential DoS security problem.\n"
+ "Remove the CommandSocket directive from the bbackupd.conf file if bbackupctl\n"
+ "is not used.\n"
+ "==============================================================================\n"
);
}
}
@@ -294,7 +248,7 @@ void BackupDaemon::DeleteAllLocations()
// Clear the contents of the map, so it is empty
mLocations.clear();
- // And delete everything from the assoicated mount vector
+ // And delete everything from the associated mount vector
mIDMapMounts.clear();
}
@@ -322,6 +276,7 @@ int BackupDaemon::ProcessOption(signed int option)
case 'S':
{
mServiceName = optarg;
+ Logging::SetProgramName(mServiceName);
return 0;
}
@@ -356,8 +311,6 @@ int BackupDaemon::Main(const std::string &rConfigFileName)
return RemoveService(mServiceName);
}
- Logging::SetProgramName("Box Backup (" + mServiceName + ")");
-
int returnCode;
if (mRunAsService)
@@ -377,220 +330,6 @@ int BackupDaemon::Main(const std::string &rConfigFileName)
return returnCode;
}
-
-void BackupDaemon::RunHelperThread(void)
-{
- const Configuration &conf(GetConfiguration());
- mpCommandSocketInfo = new CommandSocketInfo;
- WinNamedPipeStream& rSocket(mpCommandSocketInfo->mListeningSocket);
-
- // loop until the parent process exits, or we decide
- // to kill the thread ourselves
- while (!IsTerminateWanted())
- {
- try
- {
- std::string socket = conf.GetKeyValue("CommandSocket");
- rSocket.Accept(socket);
- }
- catch (BoxException &e)
- {
- BOX_ERROR("Failed to open command socket: " <<
- e.what());
- SetTerminateWanted();
- break; // this is fatal to listening thread
- }
- catch(std::exception &e)
- {
- BOX_ERROR("Failed to open command socket: " <<
- e.what());
- SetTerminateWanted();
- break; // this is fatal to listening thread
- }
- catch(...)
- {
- BOX_ERROR("Failed to open command socket: "
- "unknown error");
- SetTerminateWanted();
- break; // this is fatal to listening thread
- }
-
- try
- {
- // Errors here do not kill the thread,
- // only the current connection.
-
- // This next section comes from Ben's original function
- // Log
- BOX_INFO("Connection from command socket");
-
- // Send a header line summarising the configuration
- // and current state
- char summary[256];
- size_t summarySize = sprintf(summary,
- "bbackupd: %d %d %d %d\nstate %d\n",
- conf.GetKeyValueBool("AutomaticBackup"),
- conf.GetKeyValueInt("UpdateStoreInterval"),
- conf.GetKeyValueInt("MinimumFileAge"),
- conf.GetKeyValueInt("MaxUploadWait"),
- mState);
-
- rSocket.Write(summary, summarySize);
- rSocket.Write("ping\n", 5);
-
- // old queued messages are not useful
- EnterCriticalSection(&mMessageQueueLock);
- mMessageList.clear();
- ResetEvent(mhMessageToSendEvent);
- LeaveCriticalSection(&mMessageQueueLock);
-
- IOStreamGetLine readLine(rSocket);
- std::string command;
-
- while (rSocket.IsConnected() && !IsTerminateWanted())
- {
- HANDLE handles[2];
- handles[0] = mhMessageToSendEvent;
- handles[1] = rSocket.GetReadableEvent();
-
- BOX_TRACE("Received command '" << command
- << "' over command socket");
-
- DWORD result = WaitForMultipleObjects(
- sizeof(handles)/sizeof(*handles),
- handles, FALSE, 1000);
-
- if(result == 0)
- {
- ResetEvent(mhMessageToSendEvent);
-
- EnterCriticalSection(&mMessageQueueLock);
- try
- {
- while (mMessageList.size() > 0)
- {
- std::string message = *(mMessageList.begin());
- mMessageList.erase(mMessageList.begin());
- printf("Sending '%s' to waiting client... ", message.c_str());
- message += "\n";
- rSocket.Write(message.c_str(),
- message.length());
-
- printf("done.\n");
- }
- }
- catch (...)
- {
- LeaveCriticalSection(&mMessageQueueLock);
- throw;
- }
- LeaveCriticalSection(&mMessageQueueLock);
- continue;
- }
- else if(result == WAIT_TIMEOUT)
- {
- continue;
- }
- else if(result != 1)
- {
- BOX_ERROR("WaitForMultipleObjects returned invalid result " << result);
- continue;
- }
-
- if(!readLine.GetLine(command))
- {
- BOX_ERROR("Failed to read line");
- continue;
- }
-
- BOX_INFO("Received command " << command <<
- " from client");
-
- bool sendOK = false;
- bool sendResponse = true;
- bool disconnect = false;
-
- // Command to process!
- if(command == "quit" || command == "")
- {
- // Close the socket.
- disconnect = true;
- sendResponse = false;
- }
- else if(command == "sync")
- {
- // Sync now!
- this->mDoSyncFlagOut = true;
- this->mSyncIsForcedOut = false;
- sendOK = true;
- SetEvent(mhCommandReceivedEvent);
- }
- else if(command == "force-sync")
- {
- // Sync now (forced -- overrides any SyncAllowScript)
- this->mDoSyncFlagOut = true;
- this->mSyncIsForcedOut = true;
- sendOK = true;
- SetEvent(mhCommandReceivedEvent);
- }
- else if(command == "reload")
- {
- // Reload the configuration
- SetReloadConfigWanted();
- sendOK = true;
- SetEvent(mhCommandReceivedEvent);
- }
- else if(command == "terminate")
- {
- // Terminate the daemon cleanly
- SetTerminateWanted();
- sendOK = true;
- SetEvent(mhCommandReceivedEvent);
- }
- else
- {
- BOX_ERROR("Received unknown command "
- "'" << command << "' "
- "from client");
- sendResponse = true;
- sendOK = false;
- }
-
- // Send a response back?
- if(sendResponse)
- {
- const char* response = sendOK ? "ok\n" : "error\n";
- rSocket.Write(
- response, strlen(response));
- }
-
- if(disconnect)
- {
- break;
- }
- }
-
- rSocket.Close();
- }
- catch(BoxException &e)
- {
- BOX_ERROR("Communication error with "
- "control client: " << e.what());
- }
- catch(std::exception &e)
- {
- BOX_ERROR("Internal error in command socket "
- "thread: " << e.what());
- }
- catch(...)
- {
- BOX_ERROR("Communication error with control client");
- }
- }
-
- CloseHandle(mhCommandReceivedEvent);
- CloseHandle(mhMessageToSendEvent);
-}
#endif
// --------------------------------------------------------------------------
@@ -606,36 +345,29 @@ void BackupDaemon::Run()
// initialise global timer mechanism
Timers::Init();
- #ifdef WIN32
- // Create a thread to handle the named pipe
- HANDLE hThread;
- unsigned int dwThreadId;
-
- hThread = (HANDLE) _beginthreadex(
- NULL, // default security attributes
- 0, // use default stack size
- HelperThread, // thread function
- this, // argument to thread function
- 0, // use default creation flags
- &dwThreadId); // returns the thread identifier
- #else
+ #ifndef WIN32
// Ignore SIGPIPE so that if a command connection is broken,
// the daemon doesn't terminate.
::signal(SIGPIPE, SIG_IGN);
+ #endif
- // Create a command socket?
- const Configuration &conf(GetConfiguration());
- if(conf.KeyExists("CommandSocket"))
- {
- // Yes, create a local UNIX socket
- mpCommandSocketInfo = new CommandSocketInfo;
- const char *socketName =
- conf.GetKeyValue("CommandSocket").c_str();
+ // Create a command socket?
+ const Configuration &conf(GetConfiguration());
+ if(conf.KeyExists("CommandSocket"))
+ {
+ // Yes, create a local UNIX socket
+ mapCommandSocketInfo.reset(new CommandSocketInfo);
+ const char *socketName =
+ conf.GetKeyValue("CommandSocket").c_str();
+ #ifdef WIN32
+ mapCommandSocketInfo->mListeningSocket.Listen(
+ socketName);
+ #else
::unlink(socketName);
- mpCommandSocketInfo->mListeningSocket.Listen(
+ mapCommandSocketInfo->mListeningSocket.Listen(
Socket::TypeUNIX, socketName);
- }
- #endif // !WIN32
+ #endif
+ }
// Handle things nicely on exceptions
try
@@ -644,16 +376,11 @@ void BackupDaemon::Run()
}
catch(...)
{
- #ifdef WIN32
- // Don't delete the socket, as the helper thread
- // is probably still using it. Let Windows clean
- // up after us.
- #else
- if(mpCommandSocketInfo != 0)
+ if(mapCommandSocketInfo.get())
{
try
{
- delete mpCommandSocketInfo;
+ mapCommandSocketInfo.reset();
}
catch(std::exception &e)
{
@@ -666,91 +393,63 @@ void BackupDaemon::Run()
BOX_WARNING("Error closing command socket "
"after exception, ignored.");
}
- mpCommandSocketInfo = 0;
}
- #endif // WIN32
Timers::Cleanup();
throw;
}
- #ifndef WIN32
- // Clean up
- if(mpCommandSocketInfo != 0)
- {
- delete mpCommandSocketInfo;
- mpCommandSocketInfo = 0;
- }
- #endif
-
+ // Clean up
+ mapCommandSocketInfo.reset();
Timers::Cleanup();
}
-// --------------------------------------------------------------------------
-//
-// Function
-// Name: BackupDaemon::Run2()
-// Purpose: Run function for daemon (second stage)
-// Created: 2003/10/08
-//
-// --------------------------------------------------------------------------
-void BackupDaemon::Run2()
+void BackupDaemon::InitCrypto()
{
// Read in the certificates creating a TLS context
- TLSContext tlsContext;
const Configuration &conf(GetConfiguration());
std::string certFile(conf.GetKeyValue("CertificateFile"));
std::string keyFile(conf.GetKeyValue("PrivateKeyFile"));
std::string caFile(conf.GetKeyValue("TrustedCAsFile"));
- tlsContext.Initialise(false /* as client */, certFile.c_str(), keyFile.c_str(), caFile.c_str());
+ mTlsContext.Initialise(false /* as client */, certFile.c_str(),
+ keyFile.c_str(), caFile.c_str());
// Set up the keys for various things
BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+}
- // Setup various timings
- int maximumDiffingTime = 600;
- int keepAliveTime = 60;
+// --------------------------------------------------------------------------
+//
+// Function
+// Name: BackupDaemon::Run2()
+// Purpose: Run function for daemon (second stage)
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+void BackupDaemon::Run2()
+{
+ InitCrypto();
- // max diffing time, keep-alive time
- if(conf.KeyExists("MaximumDiffingTime"))
- {
- maximumDiffingTime = conf.GetKeyValueInt("MaximumDiffingTime");
- }
- if(conf.KeyExists("KeepAliveTime"))
- {
- keepAliveTime = conf.GetKeyValueInt("KeepAliveTime");
- }
+ const Configuration &conf(GetConfiguration());
// How often to connect to the store (approximate)
- box_time_t updateStoreInterval = SecondsToBoxTime(conf.GetKeyValueInt("UpdateStoreInterval"));
+ mUpdateStoreInterval = SecondsToBoxTime(
+ conf.GetKeyValueInt("UpdateStoreInterval"));
// But are we connecting automatically?
bool automaticBackup = conf.GetKeyValueBool("AutomaticBackup");
- // The minimum age a file needs to be before it will be considered for uploading
- box_time_t minimumFileAge = SecondsToBoxTime(conf.GetKeyValueInt("MinimumFileAge"));
-
- // The maximum time we'll wait to upload a file, regardless of how often it's modified
- box_time_t maxUploadWait = SecondsToBoxTime(conf.GetKeyValueInt("MaxUploadWait"));
- // Adjust by subtracting the minimum file age, so is relative to sync period end in comparisons
- maxUploadWait = (maxUploadWait > minimumFileAge)?(maxUploadWait - minimumFileAge):(0);
-
// When the next sync should take place -- which is ASAP
- box_time_t nextSyncTime = 0;
+ mNextSyncTime = 0;
// When the last sync started (only updated if the store was not full when the sync ended)
- box_time_t lastSyncTime = 0;
+ mLastSyncTime = 0;
// --------------------------------------------------------------------------------------------
- // And what's the current client store marker?
- int64_t clientStoreMarker =
- BackupClientContext::ClientStoreMarker_NotKnown;
- // haven't contacted the store yet
-
- bool deleteStoreObjectInfoFile = DeserializeStoreObjectInfo(
- clientStoreMarker, lastSyncTime, nextSyncTime);
+ mDeleteStoreObjectInfoFile = DeserializeStoreObjectInfo(
+ mLastSyncTime, mNextSyncTime);
// --------------------------------------------------------------------------------------------
@@ -758,91 +457,98 @@ void BackupDaemon::Run2()
// Set state
SetState(State_Idle);
+ mDoSyncForcedByPreviousSyncError = false;
+
// Loop around doing backups
do
{
// Flags used below
bool storageLimitExceeded = false;
bool doSync = false;
- bool doSyncForcedByCommand = false;
+ bool mDoSyncForcedByCommand = false;
// Is a delay necessary?
+ box_time_t currentTime;
+
+ do
{
- box_time_t currentTime;
- do
+ // Check whether we should be stopping,
+ // and don't run a sync if so.
+ if(StopRun()) break;
+
+ currentTime = GetCurrentBoxTime();
+
+ // Pause a while, but no more than
+ // MAX_SLEEP_TIME seconds (use the conditional
+ // because times are unsigned)
+ box_time_t requiredDelay =
+ (mNextSyncTime < currentTime)
+ ? (0)
+ : (mNextSyncTime - currentTime);
+
+ // If there isn't automatic backup happening,
+ // set a long delay. And limit delays at the
+ // same time.
+ if(!automaticBackup && !mDoSyncForcedByPreviousSyncError)
{
- // Check whether we should be stopping,
- // and don't run a sync if so.
- if(StopRun()) break;
-
- currentTime = GetCurrentBoxTime();
-
- // Pause a while, but no more than
- // MAX_SLEEP_TIME seconds (use the conditional
- // because times are unsigned)
- box_time_t requiredDelay =
- (nextSyncTime < currentTime)
- ? (0)
- : (nextSyncTime - currentTime);
-
- // If there isn't automatic backup happening,
- // set a long delay. And limit delays at the
- // same time.
- if(!automaticBackup || requiredDelay >
- SecondsToBoxTime(MAX_SLEEP_TIME))
+ requiredDelay = SecondsToBoxTime(MAX_SLEEP_TIME);
+ }
+ else if(requiredDelay > SecondsToBoxTime(MAX_SLEEP_TIME))
+ {
+ requiredDelay = SecondsToBoxTime(MAX_SLEEP_TIME);
+ }
+
+ // Only delay if necessary
+ if(requiredDelay > 0)
+ {
+ // Sleep somehow. There are choices
+ // on how this should be done,
+ // depending on the state of the
+ // control connection
+ if(mapCommandSocketInfo.get() != 0)
{
- requiredDelay = SecondsToBoxTime(
- MAX_SLEEP_TIME);
+ // A command socket exists,
+ // so sleep by waiting on it
+ WaitOnCommandSocket(requiredDelay,
+ doSync, mDoSyncForcedByCommand);
}
-
- // Only delay if necessary
- if(requiredDelay > 0)
+ else
{
- // Sleep somehow. There are choices
- // on how this should be done,
- // depending on the state of the
- // control connection
- if(mpCommandSocketInfo != 0)
- {
- // A command socket exists,
- // so sleep by waiting on it
- WaitOnCommandSocket(
- requiredDelay, doSync,
- doSyncForcedByCommand);
- }
- else
- {
- // No command socket or
- // connection, just do a
- // normal sleep
- time_t sleepSeconds =
- BoxTimeToSeconds(
- requiredDelay);
- ::sleep((sleepSeconds <= 0)
- ? 1
- : sleepSeconds);
- }
+ // No command socket or
+ // connection, just do a
+ // normal sleep
+ time_t sleepSeconds =
+ BoxTimeToSeconds(requiredDelay);
+ ::sleep((sleepSeconds <= 0)
+ ? 1 : sleepSeconds);
}
-
- } while((!automaticBackup || (currentTime < nextSyncTime)) && !doSync && !StopRun());
+ }
+
+ if ((automaticBackup || mDoSyncForcedByPreviousSyncError)
+ && currentTime >= mNextSyncTime)
+ {
+ doSync = true;
+ }
}
+ while(!doSync && !StopRun());
// Time of sync start, and if it's time for another sync
// (and we're doing automatic syncs), set the flag
- box_time_t currentSyncStartTime = GetCurrentBoxTime();
- if(automaticBackup && currentSyncStartTime >= nextSyncTime)
+ mCurrentSyncStartTime = GetCurrentBoxTime();
+ if((automaticBackup || mDoSyncForcedByPreviousSyncError) &&
+ mCurrentSyncStartTime >= mNextSyncTime)
{
doSync = true;
}
// Use a script to see if sync is allowed now?
- if(!doSyncForcedByCommand && doSync && !StopRun())
+ if(!mDoSyncForcedByCommand && doSync && !StopRun())
{
int d = UseScriptToSeeIfSyncAllowed();
if(d > 0)
{
// Script has asked for a delay
- nextSyncTime = GetCurrentBoxTime() +
+ mNextSyncTime = GetCurrentBoxTime() +
SecondsToBoxTime(d);
doSync = false;
}
@@ -852,383 +558,448 @@ void BackupDaemon::Run2()
// to be stopping)
if(doSync && !StopRun())
{
- // Touch a file to record times in filesystem
- TouchFileInWorkingDir("last_sync_start");
+ RunSyncNowWithExceptionHandling();
+ }
- // Tell anything connected to the command socket
- SendSyncStartOrFinish(true /* start */);
-
- // Reset statistics on uploads
- BackupStoreFile::ResetStats();
-
- // Calculate the sync period of files to examine
- box_time_t syncPeriodStart = lastSyncTime;
- box_time_t syncPeriodEnd = currentSyncStartTime -
- minimumFileAge;
+ // Set state
+ SetState(storageLimitExceeded?State_StorageLimitExceeded:State_Idle);
- if(syncPeriodStart >= syncPeriodEnd &&
- syncPeriodStart - syncPeriodEnd < minimumFileAge)
- {
- // This can happen if we receive a force-sync
- // command less than minimumFileAge after
- // the last sync. Deal with it by moving back
- // syncPeriodStart, which should not do any
- // damage.
- syncPeriodStart = syncPeriodEnd -
- SecondsToBoxTime(1);
- }
+ } while(!StopRun());
+
+ // Make sure we have a clean start next time round (if restart)
+ DeleteAllLocations();
+ DeleteAllIDMaps();
+}
- if(syncPeriodStart >= syncPeriodEnd)
- {
- BOX_ERROR("Invalid (negative) sync period: "
- "perhaps your clock is going "
- "backwards (" << syncPeriodStart <<
- " to " << syncPeriodEnd << ")");
- THROW_EXCEPTION(ClientException,
- ClockWentBackwards);
- }
+void BackupDaemon::RunSyncNowWithExceptionHandling()
+{
+ OnBackupStart();
- // Check logic
- ASSERT(syncPeriodEnd > syncPeriodStart);
- // Paranoid check on sync times
- if(syncPeriodStart >= syncPeriodEnd) continue;
-
- // Adjust syncPeriodEnd to emulate snapshot
- // behaviour properly
- box_time_t syncPeriodEndExtended = syncPeriodEnd;
- // Using zero min file age?
- if(minimumFileAge == 0)
- {
- // Add a year on to the end of the end time,
- // to make sure we sync files which are
- // modified after the scan run started.
- // Of course, they may be eligible to be
- // synced again the next time round,
- // but this should be OK, because the changes
- // only upload should upload no data.
- syncPeriodEndExtended += SecondsToBoxTime(
- (time_t)(356*24*3600));
- }
+ // Do sync
+ bool errorOccurred = false;
+ int errorCode = 0, errorSubCode = 0;
+ const char* errorString = "unknown";
- // Delete the serialised store object file,
- // so that we don't try to reload it after a
- // partially completed backup
- if(deleteStoreObjectInfoFile &&
- !DeleteStoreObjectInfo())
- {
- BOX_ERROR("Failed to delete the "
- "StoreObjectInfoFile, backup cannot "
- "continue safely.");
- THROW_EXCEPTION(ClientException,
- FailedToDeleteStoreObjectInfoFile);
- }
+ try
+ {
+ RunSyncNow();
+ }
+ catch(BoxException &e)
+ {
+ errorOccurred = true;
+ errorString = e.what();
+ errorCode = e.GetType();
+ errorSubCode = e.GetSubType();
+ }
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error during backup run: " << e.what());
+ errorOccurred = true;
+ errorString = e.what();
+ }
+ catch(...)
+ {
+ // TODO: better handling of exceptions here...
+ // need to be very careful
+ errorOccurred = true;
+ }
- // In case the backup throws an exception,
- // we should not try to delete the store info
- // object file again.
- deleteStoreObjectInfoFile = false;
-
- // Do sync
- bool errorOccurred = false;
- int errorCode = 0, errorSubCode = 0;
- const char* errorString = "unknown";
+ // do not retry immediately without a good reason
+ mDoSyncForcedByPreviousSyncError = false;
+
+ if(errorOccurred)
+ {
+ // Is it a berkely db failure?
+ bool isBerkelyDbFailure = false;
- try
- {
- // Set state and log start
- SetState(State_Connected);
- BOX_NOTICE("Beginning scan of local files");
+ if (errorCode == BackupStoreException::ExceptionType
+ && errorSubCode == BackupStoreException::BerkelyDBFailure)
+ {
+ isBerkelyDbFailure = true;
+ }
- std::string extendedLogFile;
- if (conf.KeyExists("ExtendedLogFile"))
- {
- extendedLogFile = conf.GetKeyValue(
- "ExtendedLogFile");
- }
-
- if (conf.KeyExists("LogAllFileAccess"))
- {
- mLogAllFileAccess =
- conf.GetKeyValueBool(
- "LogAllFileAccess");
- }
-
- // Then create a client context object (don't
- // just connect, as this may be unnecessary)
- BackupClientContext clientContext
- (
- *this,
- tlsContext,
- conf.GetKeyValue("StoreHostname"),
- conf.GetKeyValueInt("AccountNumber"),
- conf.GetKeyValueBool("ExtendedLogging"),
- conf.KeyExists("ExtendedLogFile"),
- extendedLogFile
- );
-
- // Set up the sync parameters
- BackupClientDirectoryRecord::SyncParams params(
- *this, *this, clientContext);
- params.mSyncPeriodStart = syncPeriodStart;
- params.mSyncPeriodEnd = syncPeriodEndExtended;
- // use potentially extended end time
- params.mMaxUploadWait = maxUploadWait;
- params.mFileTrackingSizeThreshold =
- conf.GetKeyValueInt(
- "FileTrackingSizeThreshold");
- params.mDiffingUploadSizeThreshold =
- conf.GetKeyValueInt(
- "DiffingUploadSizeThreshold");
- params.mMaxFileTimeInFuture =
- SecondsToBoxTime(
- conf.GetKeyValueInt(
- "MaxFileTimeInFuture"));
- mDeleteRedundantLocationsAfter =
- conf.GetKeyValueInt(
- "DeleteRedundantLocationsAfter");
-
- clientContext.SetMaximumDiffingTime(maximumDiffingTime);
- clientContext.SetKeepAliveTime(keepAliveTime);
-
- // Set store marker
- clientContext.SetClientStoreMarker(clientStoreMarker);
-
- // Set up the locations, if necessary --
- // need to do it here so we have a
- // (potential) connection to use
- if(mLocations.empty())
- {
- const Configuration &locations(
- conf.GetSubConfiguration(
- "BackupLocations"));
-
- // Make sure all the directory records
- // are set up
- SetupLocations(clientContext, locations);
- }
-
- // Get some ID maps going
- SetupIDMapsForSync();
-
- // Delete any unused directories?
- DeleteUnusedRootDirEntries(clientContext);
-
- // Notify administrator
- NotifySysadmin(NotifyEvent_BackupStart);
-
- // Go through the records, syncing them
- for(std::vector<Location *>::const_iterator
- i(mLocations.begin());
- i != mLocations.end(); ++i)
- {
- // Set current and new ID map pointers
- // in the context
- clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex], mNewIDMaps[(*i)->mIDMapIndex]);
-
- // Set exclude lists (context doesn't
- // take ownership)
- clientContext.SetExcludeLists(
- (*i)->mpExcludeFiles,
- (*i)->mpExcludeDirs);
-
- // Sync the directory
- (*i)->mpDirectoryRecord->SyncDirectory(
- params,
- BackupProtocolClientListDirectory::RootDirectory,
- (*i)->mPath);
+ if(isBerkelyDbFailure)
+ {
+ // Delete corrupt files
+ DeleteCorruptBerkelyDbFiles();
+ }
- // Unset exclude lists (just in case)
- clientContext.SetExcludeLists(0, 0);
- }
-
- // Errors reading any files?
- if(params.mReadErrorsOnFilesystemObjects)
- {
- // Notify administrator
- NotifySysadmin(NotifyEvent_ReadError);
- }
- else
- {
- // Unset the read error flag, so the // error is reported again if it
- // happens again
- mNotificationsSent[NotifyEvent_ReadError] = false;
- }
-
- // Perform any deletions required -- these are
- // delayed until the end to allow renaming to
- // happen neatly.
- clientContext.PerformDeletions();
+ // Clear state data
+ // Go back to beginning of time
+ mLastSyncTime = 0;
+ mClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
+ DeleteAllLocations();
+ DeleteAllIDMaps();
- // Close any open connection
- clientContext.CloseAnyOpenConnection();
-
- // Get the new store marker
- clientStoreMarker = clientContext.GetClientStoreMarker();
-
- // Check the storage limit
- if(clientContext.StorageLimitExceeded())
- {
- // Tell the sysadmin about this
- NotifySysadmin(NotifyEvent_StoreFull);
- }
- else
- {
- // The start time of the next run is
- // the end time of this run.
- // This is only done if the storage
- // limit wasn't exceeded (as things
- // won't have been done properly if
- // it was)
- lastSyncTime = syncPeriodEnd;
-
- // unflag the storage full notify flag
- // so that next time the store is full,
- // an alert will be sent
- mNotificationsSent[NotifyEvent_StoreFull] = false;
- }
-
- // Calculate when the next sync run should be
- nextSyncTime = currentSyncStartTime +
- updateStoreInterval +
- Random::RandomInt(updateStoreInterval >>
+ // Handle restart?
+ if(StopRun())
+ {
+ BOX_NOTICE("Exception (" << errorCode
+ << "/" << errorSubCode
+ << ") due to signal");
+ OnBackupFinish();
+ return;
+ }
+
+ NotifySysadmin(SysadminNotifier::BackupError);
+
+ // If the Berkely db files get corrupted,
+ // delete them and try again immediately.
+ if(isBerkelyDbFailure)
+ {
+ BOX_ERROR("Berkely db inode map files corrupted, "
+ "deleting and restarting scan. Renamed files "
+ "and directories will not be tracked until "
+ "after this scan.");
+ ::sleep(1);
+ }
+ else
+ {
+ // Not restart/terminate, pause and retry
+ // Notify administrator
+ SetState(State_Error);
+ BOX_ERROR("Exception caught (" << errorString <<
+ " " << errorCode << "/" << errorSubCode <<
+ "), reset state and waiting to retry...");
+ ::sleep(10);
+ mNextSyncTime = mCurrentSyncStartTime +
+ SecondsToBoxTime(100) +
+ Random::RandomInt(mUpdateStoreInterval >>
SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
-
- // Commit the ID Maps
- CommitIDMapsAfterSync();
+ }
+ }
+ // Notify system administrator about the final state of the backup
+ else if(mReadErrorsOnFilesystemObjects)
+ {
+ NotifySysadmin(SysadminNotifier::ReadError);
+ }
+ else if(mStorageLimitExceeded)
+ {
+ NotifySysadmin(SysadminNotifier::StoreFull);
+ }
+ else
+ {
+ NotifySysadmin(SysadminNotifier::BackupOK);
+ }
+
+ // If we were retrying after an error, and this backup succeeded,
+ // then now would be a good time to stop :-)
+ mDoSyncForcedByPreviousSyncError = errorOccurred;
- // Log
- BOX_NOTICE("Finished scan of local files");
+ OnBackupFinish();
+}
- // Notify administrator
- NotifySysadmin(NotifyEvent_BackupFinish);
+void BackupDaemon::RunSyncNow()
+{
+ // Delete the serialised store object file,
+ // so that we don't try to reload it after a
+ // partially completed backup
+ if(mDeleteStoreObjectInfoFile &&
+ !DeleteStoreObjectInfo())
+ {
+ BOX_ERROR("Failed to delete the StoreObjectInfoFile, "
+ "backup cannot continue safely.");
+ THROW_EXCEPTION(ClientException,
+ FailedToDeleteStoreObjectInfoFile);
+ }
- // --------------------------------------------------------------------------------------------
+ // In case the backup throws an exception,
+ // we should not try to delete the store info
+ // object file again.
+ mDeleteStoreObjectInfoFile = false;
- // We had a successful backup, save the store
- // info. If we save successfully, we must
- // delete the file next time we start a backup
+ const Configuration &conf(GetConfiguration());
- deleteStoreObjectInfoFile =
- SerializeStoreObjectInfo(
- clientStoreMarker,
- lastSyncTime, nextSyncTime);
+ std::auto_ptr<FileLogger> fileLogger;
- // --------------------------------------------------------------------------------------------
- }
- catch(BoxException &e)
- {
- errorOccurred = true;
- errorString = e.what();
- errorCode = e.GetType();
- errorSubCode = e.GetSubType();
- }
- catch(std::exception &e)
- {
- BOX_ERROR("Internal error during "
- "backup run: " << e.what());
- errorOccurred = true;
- errorString = e.what();
- }
- catch(...)
- {
- // TODO: better handling of exceptions here...
- // need to be very careful
- errorOccurred = true;
- }
-
- if(errorOccurred)
- {
- // Is it a berkely db failure?
- bool isBerkelyDbFailure = false;
+ if (conf.KeyExists("LogFile"))
+ {
+ Log::Level level = Log::INFO;
+ if (conf.KeyExists("LogFileLevel"))
+ {
+ level = Logging::GetNamedLevel(
+ conf.GetKeyValue("LogFileLevel"));
+ }
+ fileLogger.reset(new FileLogger(conf.GetKeyValue("LogFile"),
+ level));
+ }
- if (errorCode == BackupStoreException::ExceptionType
- && errorSubCode == BackupStoreException::BerkelyDBFailure)
- {
- isBerkelyDbFailure = true;
- }
+ std::string extendedLogFile;
+ if (conf.KeyExists("ExtendedLogFile"))
+ {
+ extendedLogFile = conf.GetKeyValue("ExtendedLogFile");
+ }
+
+ if (conf.KeyExists("LogAllFileAccess"))
+ {
+ mLogAllFileAccess = conf.GetKeyValueBool("LogAllFileAccess");
+ }
+
+ // Then create a client context object (don't
+ // just connect, as this may be unnecessary)
+ BackupClientContext clientContext
+ (
+ *mpLocationResolver,
+ mTlsContext,
+ conf.GetKeyValue("StoreHostname"),
+ conf.GetKeyValueInt("StorePort"),
+ conf.GetKeyValueInt("AccountNumber"),
+ conf.GetKeyValueBool("ExtendedLogging"),
+ conf.KeyExists("ExtendedLogFile"),
+ extendedLogFile, *mpProgressNotifier
+ );
+
+ // The minimum age a file needs to be before it will be
+ // considered for uploading
+ box_time_t minimumFileAge = SecondsToBoxTime(
+ conf.GetKeyValueInt("MinimumFileAge"));
- if(isBerkelyDbFailure)
- {
- // Delete corrupt files
- DeleteCorruptBerkelyDbFiles();
- }
+ // The maximum time we'll wait to upload a file, regardless
+ // of how often it's modified
+ box_time_t maxUploadWait = SecondsToBoxTime(
+ conf.GetKeyValueInt("MaxUploadWait"));
+ // Adjust by subtracting the minimum file age, so is relative
+ // to sync period end in comparisons
+ if (maxUploadWait > minimumFileAge)
+ {
+ maxUploadWait -= minimumFileAge;
+ }
+ else
+ {
+ maxUploadWait = 0;
+ }
- // Clear state data
- syncPeriodStart = 0;
- // go back to beginning of time
- clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
- DeleteAllLocations();
- DeleteAllIDMaps();
+ // Calculate the sync period of files to examine
+ box_time_t syncPeriodStart = mLastSyncTime;
+ box_time_t syncPeriodEnd = GetCurrentBoxTime() - minimumFileAge;
- // Handle restart?
- if(StopRun())
- {
- BOX_NOTICE("Exception (" << errorCode
- << "/" << errorSubCode
- << ") due to signal");
- return;
- }
+ if(syncPeriodStart >= syncPeriodEnd &&
+ syncPeriodStart - syncPeriodEnd < minimumFileAge)
+ {
+ // This can happen if we receive a force-sync command less
+ // than minimumFileAge after the last sync. Deal with it by
+ // moving back syncPeriodStart, which should not do any
+ // damage.
+ syncPeriodStart = syncPeriodEnd -
+ SecondsToBoxTime(1);
+ }
- // If the Berkely db files get corrupted, delete them and try again immediately
- if(isBerkelyDbFailure)
- {
- BOX_ERROR("Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.");
- ::sleep(1);
- }
- else
- {
- // Not restart/terminate, pause and retry
- // Notify administrator
- NotifySysadmin(NotifyEvent_BackupError);
- SetState(State_Error);
- BOX_ERROR("Exception caught ("
- << errorString
- << " " << errorCode
- << "/" << errorSubCode
- << "), reset state and "
- "waiting to retry...");
- ::sleep(10);
- nextSyncTime = currentSyncStartTime +
- SecondsToBoxTime(90) +
- Random::RandomInt(
- updateStoreInterval >>
- SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
- }
- }
+ if(syncPeriodStart >= syncPeriodEnd)
+ {
+ BOX_ERROR("Invalid (negative) sync period: "
+ "perhaps your clock is going "
+ "backwards (" << syncPeriodStart <<
+ " to " << syncPeriodEnd << ")");
+ THROW_EXCEPTION(ClientException,
+ ClockWentBackwards);
+ }
- // Log the stats
- BOX_NOTICE("File statistics: total file size uploaded "
- << BackupStoreFile::msStats.mBytesInEncodedFiles
- << ", bytes already on server "
- << BackupStoreFile::msStats.mBytesAlreadyOnServer
- << ", encoded size "
- << BackupStoreFile::msStats.mTotalFileStreamSize);
- BackupStoreFile::ResetStats();
+ // Check logic
+ ASSERT(syncPeriodEnd > syncPeriodStart);
+ // Paranoid check on sync times
+ if(syncPeriodStart >= syncPeriodEnd) return;
+
+ // Adjust syncPeriodEnd to emulate snapshot
+ // behaviour properly
+ box_time_t syncPeriodEndExtended = syncPeriodEnd;
+
+ // Using zero min file age?
+ if(minimumFileAge == 0)
+ {
+ // Add a year on to the end of the end time,
+ // to make sure we sync files which are
+ // modified after the scan run started.
+ // Of course, they may be eligible to be
+ // synced again the next time round,
+ // but this should be OK, because the changes
+ // only upload should upload no data.
+ syncPeriodEndExtended += SecondsToBoxTime(
+ (time_t)(356*24*3600));
+ }
+
+ // Set up the sync parameters
+ BackupClientDirectoryRecord::SyncParams params(*mpRunStatusProvider,
+ *mpSysadminNotifier, *mpProgressNotifier, clientContext);
+ params.mSyncPeriodStart = syncPeriodStart;
+ params.mSyncPeriodEnd = syncPeriodEndExtended;
+ // use potentially extended end time
+ params.mMaxUploadWait = maxUploadWait;
+ params.mFileTrackingSizeThreshold =
+ conf.GetKeyValueInt("FileTrackingSizeThreshold");
+ params.mDiffingUploadSizeThreshold =
+ conf.GetKeyValueInt("DiffingUploadSizeThreshold");
+ params.mMaxFileTimeInFuture =
+ SecondsToBoxTime(conf.GetKeyValueInt("MaxFileTimeInFuture"));
+ mDeleteRedundantLocationsAfter =
+ conf.GetKeyValueInt("DeleteRedundantLocationsAfter");
+ mStorageLimitExceeded = false;
+ mReadErrorsOnFilesystemObjects = false;
- // Tell anything connected to the command socket
- SendSyncStartOrFinish(false /* finish */);
+ // Setup various timings
+ int maximumDiffingTime = 600;
+ int keepAliveTime = 60;
- // Touch a file to record times in filesystem
- TouchFileInWorkingDir("last_sync_finish");
- }
+ // max diffing time, keep-alive time
+ if(conf.KeyExists("MaximumDiffingTime"))
+ {
+ maximumDiffingTime = conf.GetKeyValueInt("MaximumDiffingTime");
+ }
+ if(conf.KeyExists("KeepAliveTime"))
+ {
+ keepAliveTime = conf.GetKeyValueInt("KeepAliveTime");
+ }
+
+ clientContext.SetMaximumDiffingTime(maximumDiffingTime);
+ clientContext.SetKeepAliveTime(keepAliveTime);
+
+ // Set store marker
+ clientContext.SetClientStoreMarker(mClientStoreMarker);
+
+ // Set up the locations, if necessary --
+ // need to do it here so we have a
+ // (potential) connection to use
+ if(mLocations.empty())
+ {
+ const Configuration &locations(
+ conf.GetSubConfiguration(
+ "BackupLocations"));
- // Set state
- SetState(storageLimitExceeded?State_StorageLimitExceeded:State_Idle);
+ // Make sure all the directory records
+ // are set up
+ SetupLocations(clientContext, locations);
+ }
+
+ mpProgressNotifier->NotifyIDMapsSetup(clientContext);
+
+ // Get some ID maps going
+ SetupIDMapsForSync();
- } while(!StopRun());
+ // Delete any unused directories?
+ DeleteUnusedRootDirEntries(clientContext);
+
+ // Go through the records, syncing them
+ for(std::vector<Location *>::const_iterator
+ i(mLocations.begin());
+ i != mLocations.end(); ++i)
+ {
+ // Set current and new ID map pointers
+ // in the context
+ clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex],
+ mNewIDMaps[(*i)->mIDMapIndex]);
- // Make sure we have a clean start next time round (if restart)
- DeleteAllLocations();
- DeleteAllIDMaps();
+ // Set exclude lists (context doesn't
+ // take ownership)
+ clientContext.SetExcludeLists(
+ (*i)->mpExcludeFiles,
+ (*i)->mpExcludeDirs);
+
+ // Sync the directory
+ (*i)->mpDirectoryRecord->SyncDirectory(
+ params,
+ BackupProtocolClientListDirectory::RootDirectory,
+ (*i)->mPath, std::string("/") + (*i)->mName);
+
+ // Unset exclude lists (just in case)
+ clientContext.SetExcludeLists(0, 0);
+ }
+
+ // Perform any deletions required -- these are
+ // delayed until the end to allow renaming to
+ // happen neatly.
+ clientContext.PerformDeletions();
+
+ // Close any open connection
+ clientContext.CloseAnyOpenConnection();
+
+ // Get the new store marker
+ mClientStoreMarker = clientContext.GetClientStoreMarker();
+ mStorageLimitExceeded = clientContext.StorageLimitExceeded();
+ mReadErrorsOnFilesystemObjects =
+ params.mReadErrorsOnFilesystemObjects;
+
+ if(!mStorageLimitExceeded)
+ {
+ // The start time of the next run is the end time of this
+ // run. This is only done if the storage limit wasn't
+ // exceeded (as things won't have been done properly if
+ // it was)
+ mLastSyncTime = syncPeriodEnd;
+ }
+
+ // Commit the ID Maps
+ CommitIDMapsAfterSync();
+
+ // Calculate when the next sync run should be
+ mNextSyncTime = mCurrentSyncStartTime +
+ mUpdateStoreInterval +
+ Random::RandomInt(mUpdateStoreInterval >>
+ SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
+
+ // --------------------------------------------------------------------------------------------
+
+ // We had a successful backup, save the store
+ // info. If we save successfully, we must
+ // delete the file next time we start a backup
+
+ mDeleteStoreObjectInfoFile =
+ SerializeStoreObjectInfo(mLastSyncTime,
+ mNextSyncTime);
+
+ // --------------------------------------------------------------------------------------------
}
+void BackupDaemon::OnBackupStart()
+{
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_start");
+
+ // Reset statistics on uploads
+ BackupStoreFile::ResetStats();
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(true /* start */);
+
+ // Notify administrator
+ NotifySysadmin(SysadminNotifier::BackupStart);
+
+ // Set state and log start
+ SetState(State_Connected);
+ BOX_NOTICE("Beginning scan of local files");
+}
+
+void BackupDaemon::OnBackupFinish()
+{
+ // Log
+ BOX_NOTICE("Finished scan of local files");
+
+ // Log the stats
+ BOX_NOTICE("File statistics: total file size uploaded "
+ << BackupStoreFile::msStats.mBytesInEncodedFiles
+ << ", bytes already on server "
+ << BackupStoreFile::msStats.mBytesAlreadyOnServer
+ << ", encoded size "
+ << BackupStoreFile::msStats.mTotalFileStreamSize);
+
+ // Reset statistics again
+ BackupStoreFile::ResetStats();
+
+ // Notify administrator
+ NotifySysadmin(SysadminNotifier::BackupFinish);
+
+ // Tell anything connected to the command socket
+ SendSyncStartOrFinish(false /* finish */);
+
+ // Touch a file to record times in filesystem
+ TouchFileInWorkingDir("last_sync_finish");
+}
// --------------------------------------------------------------------------
//
// Function
// Name: BackupDaemon::UseScriptToSeeIfSyncAllowed()
-// Purpose: Private. Use a script to see if the sync should be allowed (if configured)
-// Returns -1 if it's allowed, time in seconds to wait otherwise.
+// Purpose: Private. Use a script to see if the sync should be
+// allowed now (if configured). Returns -1 if it's
+// allowed, time in seconds to wait otherwise.
// Created: 21/6/04
//
// --------------------------------------------------------------------------
@@ -1250,7 +1021,8 @@ int BackupDaemon::UseScriptToSeeIfSyncAllowed()
pid_t pid = 0;
try
{
- std::auto_ptr<IOStream> pscript(LocalProcessStream(conf.GetKeyValue("SyncAllowScript").c_str(), pid));
+ std::auto_ptr<IOStream> pscript(LocalProcessStream(
+ conf.GetKeyValue("SyncAllowScript").c_str(), pid));
// Read in the result
IOStreamGetLine getLine(*pscript);
@@ -1323,33 +1095,13 @@ int BackupDaemon::UseScriptToSeeIfSyncAllowed()
// --------------------------------------------------------------------------
void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut)
{
-#ifdef WIN32
- DWORD requiredDelayMs = BoxTimeToMilliSeconds(RequiredDelay);
-
- DWORD result = WaitForSingleObject(mhCommandReceivedEvent,
- (DWORD)requiredDelayMs);
-
- if(result == WAIT_OBJECT_0)
- {
- DoSyncFlagOut = this->mDoSyncFlagOut;
- SyncIsForcedOut = this->mSyncIsForcedOut;
- ResetEvent(mhCommandReceivedEvent);
- }
- else if(result == WAIT_TIMEOUT)
+ ASSERT(mapCommandSocketInfo.get());
+ if(!mapCommandSocketInfo.get())
{
- DoSyncFlagOut = false;
- SyncIsForcedOut = false;
- }
- else
- {
- BOX_ERROR("Unexpected result from WaitForSingleObject: "
- "error " << GetLastError());
+ // failure case isn't too bad
+ ::sleep(1);
+ return;
}
-
- return;
-#else // ! WIN32
- ASSERT(mpCommandSocketInfo != 0);
- if(mpCommandSocketInfo == 0) {::sleep(1); return;} // failure case isn't too bad
BOX_TRACE("Wait on command socket, delay = " << RequiredDelay);
@@ -1362,12 +1114,12 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
if(timeout == INFTIM) timeout = 100000;
// Wait for socket connection, or handle a command?
- if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
{
// No connection, listen for a new one
- mpCommandSocketInfo->mpConnectedSocket.reset(mpCommandSocketInfo->mListeningSocket.Accept(timeout).release());
+ mapCommandSocketInfo->mpConnectedSocket.reset(mapCommandSocketInfo->mListeningSocket.Accept(timeout).release());
- if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
{
// If a connection didn't arrive, there was a timeout, which means we've
// waited long enough and it's time to go.
@@ -1386,7 +1138,7 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
{
uid_t remoteEUID = 0xffff;
gid_t remoteEGID = 0xffff;
- if(mpCommandSocketInfo->mpConnectedSocket->GetPeerCredentials(remoteEUID, remoteEGID))
+ if(mapCommandSocketInfo->mpConnectedSocket->GetPeerCredentials(remoteEUID, remoteEGID))
{
// Credentials are available -- check UID
if(remoteEUID == ::getuid())
@@ -1403,7 +1155,7 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
{
// Dump the connection
BOX_ERROR("Incoming command connection from peer had different user ID than this process, or security check could not be completed.");
- mpCommandSocketInfo->mpConnectedSocket.reset();
+ mapCommandSocketInfo->mpConnectedSocket.reset();
return;
}
else
@@ -1420,7 +1172,7 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
conf.GetKeyValueInt("MinimumFileAge"),
conf.GetKeyValueInt("MaxUploadWait"),
mState);
- mpCommandSocketInfo->mpConnectedSocket->Write(summary, summarySize);
+ mapCommandSocketInfo->mpConnectedSocket->Write(summary, summarySize);
// Set the timeout to something very small, so we don't wait too long on waiting
// for any incoming data
@@ -1430,22 +1182,22 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
}
// So there must be a connection now.
- ASSERT(mpCommandSocketInfo->mpConnectedSocket.get() != 0);
+ ASSERT(mapCommandSocketInfo->mpConnectedSocket.get() != 0);
// Is there a getline object ready?
- if(mpCommandSocketInfo->mpGetLine == 0)
+ if(mapCommandSocketInfo->mpGetLine == 0)
{
// Create a new one
- mpCommandSocketInfo->mpGetLine = new IOStreamGetLine(*(mpCommandSocketInfo->mpConnectedSocket.get()));
+ mapCommandSocketInfo->mpGetLine = new IOStreamGetLine(*(mapCommandSocketInfo->mpConnectedSocket.get()));
}
// Ping the remote side, to provide errors which will mean the socket gets closed
- mpCommandSocketInfo->mpConnectedSocket->Write("ping\n", 5);
+ mapCommandSocketInfo->mpConnectedSocket->Write("ping\n", 5);
// Wait for a command or something on the socket
std::string command;
- while(mpCommandSocketInfo->mpGetLine != 0 && !mpCommandSocketInfo->mpGetLine->IsEOF()
- && mpCommandSocketInfo->mpGetLine->GetLine(command, false /* no preprocessing */, timeout))
+ while(mapCommandSocketInfo->mpGetLine != 0 && !mapCommandSocketInfo->mpGetLine->IsEOF()
+ && mapCommandSocketInfo->mpGetLine->GetLine(command, false /* no preprocessing */, timeout))
{
BOX_TRACE("Receiving command '" << command
<< "' over command socket");
@@ -1490,7 +1242,7 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
// Send a response back?
if(sendResponse)
{
- mpCommandSocketInfo->mpConnectedSocket->Write(sendOK?"ok\n":"error\n", sendOK?3:6);
+ mapCommandSocketInfo->mpConnectedSocket->Write(sendOK?"ok\n":"error\n", sendOK?3:6);
}
// Set timeout to something very small, so this just checks for data which is waiting
@@ -1498,18 +1250,39 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
}
// Close on EOF?
- if(mpCommandSocketInfo->mpGetLine != 0 && mpCommandSocketInfo->mpGetLine->IsEOF())
+ if(mapCommandSocketInfo->mpGetLine != 0 && mapCommandSocketInfo->mpGetLine->IsEOF())
{
CloseCommandConnection();
}
}
+ catch(ConnectionException &ce)
+ {
+ BOX_NOTICE("Failed to write to command socket: " << ce.what());
+
+ // If an error occurs, and there is a connection active,
+ // just close that connection and continue. Otherwise,
+ // let the error propagate.
+
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ throw; // thread will die
+ }
+ else
+ {
+ // Close socket and ignore error
+ CloseCommandConnection();
+ }
+ }
catch(std::exception &e)
{
- BOX_ERROR("Internal error in command socket thread: "
- << e.what());
- // If an error occurs, and there is a connection active, just close that
- // connection and continue. Otherwise, let the error propagate.
- if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ BOX_ERROR("Failed to write to command socket: " <<
+ e.what());
+
+ // If an error occurs, and there is a connection active,
+ // just close that connection and continue. Otherwise,
+ // let the error propagate.
+
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
{
throw; // thread will die
}
@@ -1521,9 +1294,13 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
}
catch(...)
{
- // If an error occurs, and there is a connection active, just close that
- // connection and continue. Otherwise, let the error propagate.
- if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ BOX_ERROR("Failed to write to command socket: unknown error");
+
+ // If an error occurs, and there is a connection active,
+ // just close that connection and continue. Otherwise,
+ // let the error propagate.
+
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
{
throw; // thread will die
}
@@ -1533,7 +1310,6 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
CloseCommandConnection();
}
}
-#endif // WIN32
}
@@ -1547,17 +1323,16 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
// --------------------------------------------------------------------------
void BackupDaemon::CloseCommandConnection()
{
-#ifndef WIN32
try
{
BOX_TRACE("Closing command connection");
- if(mpCommandSocketInfo->mpGetLine)
+ if(mapCommandSocketInfo->mpGetLine)
{
- delete mpCommandSocketInfo->mpGetLine;
- mpCommandSocketInfo->mpGetLine = 0;
+ delete mapCommandSocketInfo->mpGetLine;
+ mapCommandSocketInfo->mpGetLine = 0;
}
- mpCommandSocketInfo->mpConnectedSocket.reset();
+ mapCommandSocketInfo->mpConnectedSocket.reset();
}
catch(std::exception &e)
{
@@ -1568,7 +1343,6 @@ void BackupDaemon::CloseCommandConnection()
{
// Ignore any errors
}
-#endif
}
@@ -1586,27 +1360,15 @@ void BackupDaemon::SendSyncStartOrFinish(bool SendStart)
// The bbackupctl program can't rely on a state change, because it
// may never change if the server doesn't need to be contacted.
- if(mpCommandSocketInfo != NULL &&
-#ifdef WIN32
- mpCommandSocketInfo->mListeningSocket.IsConnected()
-#else
- mpCommandSocketInfo->mpConnectedSocket.get() != 0
-#endif
- )
+ if(mapCommandSocketInfo.get() &&
+ mapCommandSocketInfo->mpConnectedSocket.get() != 0)
{
std::string message = SendStart ? "start-sync" : "finish-sync";
try
{
-#ifdef WIN32
- EnterCriticalSection(&mMessageQueueLock);
- mMessageList.push_back(message);
- SetEvent(mhMessageToSendEvent);
- LeaveCriticalSection(&mMessageQueueLock);
-#else
message += "\n";
- mpCommandSocketInfo->mpConnectedSocket->Write(
+ mapCommandSocketInfo->mpConnectedSocket->Write(
message.c_str(), message.size());
-#endif
}
catch(std::exception &e)
{
@@ -1668,14 +1430,20 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
// Just a check to make sure it's right.
DeleteAllLocations();
- // Going to need a copy of the root directory. Get a connection, and fetch it.
+ // Going to need a copy of the root directory. Get a connection,
+ // and fetch it.
BackupProtocolClient &connection(rClientContext.GetConnection());
- // Ask server for a list of everything in the root directory, which is a directory itself
- std::auto_ptr<BackupProtocolClientSuccess> dirreply(connection.QueryListDirectory(
+ // Ask server for a list of everything in the root directory,
+ // which is a directory itself
+ std::auto_ptr<BackupProtocolClientSuccess> dirreply(
+ connection.QueryListDirectory(
BackupProtocolClientListDirectory::RootDirectory,
- BackupProtocolClientListDirectory::Flags_Dir, // only directories
- BackupProtocolClientListDirectory::Flags_Deleted | BackupProtocolClientListDirectory::Flags_OldVersion, // exclude old/deleted stuff
+ // only directories
+ BackupProtocolClientListDirectory::Flags_Dir,
+ // exclude old/deleted stuff
+ BackupProtocolClientListDirectory::Flags_Deleted |
+ BackupProtocolClientListDirectory::Flags_OldVersion,
false /* no attributes */));
// Retrieve the directory from the stream following
@@ -1755,33 +1523,41 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
#endif // HAVE_STRUCT_MNTENT_MNT_DIR
// Check sorting and that things are as we expect
ASSERT(mountPoints.size() > 0);
-#ifndef NDEBUG
+#ifndef BOX_RELEASE_BUILD
{
std::set<std::string, mntLenCompare>::reverse_iterator i(mountPoints.rbegin());
ASSERT(*i == "/");
}
-#endif // n NDEBUG
+#endif // n BOX_RELEASE_BUILD
#endif // n HAVE_STRUCT_STATFS_F_MNTONNAME || n HAVE_STRUCT_STATVFS_F_MNTONNAME
#endif // HAVE_MOUNTS
// Then... go through each of the entries in the configuration,
// making sure there's a directory created for it.
- for(std::list<std::pair<std::string, Configuration> >::const_iterator i = rLocationsConf.mSubConfigurations.begin();
- i != rLocationsConf.mSubConfigurations.end(); ++i)
- {
- BOX_TRACE("new location: " << i->first);
+ std::vector<std::string> locNames =
+ rLocationsConf.GetSubConfigurationNames();
+
+ for(std::vector<std::string>::iterator
+ pLocName = locNames.begin();
+ pLocName != locNames.end();
+ pLocName++)
+ {
+ const Configuration& rConfig(
+ rLocationsConf.GetSubConfiguration(*pLocName));
+ BOX_TRACE("new location: " << *pLocName);
+
// Create a record for it
std::auto_ptr<Location> apLoc(new Location);
try
{
// Setup names in the location record
- apLoc->mName = i->first;
- apLoc->mPath = i->second.GetKeyValue("Path");
+ apLoc->mName = *pLocName;
+ apLoc->mPath = rConfig.GetKeyValue("Path");
// Read the exclude lists from the Configuration
- apLoc->mpExcludeFiles = BackupClientMakeExcludeList_Files(i->second);
- apLoc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(i->second);
+ apLoc->mpExcludeFiles = BackupClientMakeExcludeList_Files(rConfig);
+ apLoc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(rConfig);
// Does this exist on the server?
// Remove from dir object early, so that if we fail
@@ -1814,10 +1590,9 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
if(::statfs(apLoc->mPath.c_str(), &s) != 0)
#endif // HAVE_STRUCT_STATVFS_F_MNTONNAME
{
- BOX_WARNING("Failed to stat location "
+ BOX_LOG_SYS_WARNING("Failed to stat location "
"path '" << apLoc->mPath <<
- "' (" << strerror(errno) <<
- "), skipping location '" <<
+ "', skipping location '" <<
apLoc->mName << "'");
continue;
}
@@ -1929,7 +1704,8 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
// Create and store the directory object for the root of this location
ASSERT(oid != 0);
- BackupClientDirectoryRecord *precord = new BackupClientDirectoryRecord(oid, i->first);
+ BackupClientDirectoryRecord *precord =
+ new BackupClientDirectoryRecord(oid, *pLocName);
apLoc->mpDirectoryRecord.reset(precord);
// Push it back on the vector of locations
@@ -2007,8 +1783,8 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
// --------------------------------------------------------------------------
void BackupDaemon::SetupIDMapsForSync()
{
- // Need to do different things depending on whether it's an in memory implementation,
- // or whether it's all stored on disc.
+ // Need to do different things depending on whether it's an
+ // in memory implementation, or whether it's all stored on disc.
#ifdef BACKIPCLIENTINODETOIDMAP_IN_MEMORY_IMPLEMENTATION
@@ -2016,8 +1792,8 @@ void BackupDaemon::SetupIDMapsForSync()
DeleteIDMapVector(mNewIDMaps);
FillIDMapVector(mNewIDMaps, true /* new maps */);
- // Then make sure that the current maps have objects, even if they are empty
- // (for the very first run)
+ // Then make sure that the current maps have objects,
+ // even if they are empty (for the very first run)
if(mCurrentIDMaps.empty())
{
FillIDMapVector(mCurrentIDMaps, false /* current maps */);
@@ -2191,9 +1967,8 @@ void BackupDaemon::CommitIDMapsAfterSync()
#endif
if(::rename(newmap.c_str(), target.c_str()) != 0)
{
- BOX_ERROR("failed to rename ID map: " << newmap
- << " to " << target << ": "
- << strerror(errno));
+ BOX_LOG_SYS_ERROR("Failed to rename ID map: " <<
+ newmap << " to " << target);
THROW_EXCEPTION(CommonException, OSFileError)
}
}
@@ -2281,20 +2056,14 @@ void BackupDaemon::SetState(int State)
sprintf(newState, "state %d", State);
std::string message = newState;
-#ifdef WIN32
- EnterCriticalSection(&mMessageQueueLock);
- mMessageList.push_back(newState);
- SetEvent(mhMessageToSendEvent);
- LeaveCriticalSection(&mMessageQueueLock);
-#else
message += "\n";
- if(mpCommandSocketInfo == 0)
+ if(!mapCommandSocketInfo.get())
{
return;
}
- if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ if(mapCommandSocketInfo->mpConnectedSocket.get() == 0)
{
return;
}
@@ -2302,22 +2071,27 @@ void BackupDaemon::SetState(int State)
// Something connected to the command socket, tell it about the new state
try
{
- mpCommandSocketInfo->mpConnectedSocket->Write(message.c_str(),
+ mapCommandSocketInfo->mpConnectedSocket->Write(message.c_str(),
message.length());
}
+ catch(ConnectionException &ce)
+ {
+ BOX_NOTICE("Failed to write state to command socket: " <<
+ ce.what());
+ CloseCommandConnection();
+ }
catch(std::exception &e)
{
- BOX_ERROR("Internal error while writing state "
- "to command socket: " << e.what());
+ BOX_ERROR("Failed to write state to command socket: " <<
+ e.what());
CloseCommandConnection();
}
catch(...)
{
- BOX_ERROR("Internal error while writing state "
- "to command socket: unknown error");
+ BOX_ERROR("Failed to write state to command socket: "
+ "unknown error");
CloseCommandConnection();
}
-#endif
}
@@ -2351,7 +2125,7 @@ void BackupDaemon::TouchFileInWorkingDir(const char *Filename)
// Created: 25/2/04
//
// --------------------------------------------------------------------------
-void BackupDaemon::NotifySysadmin(int Event)
+void BackupDaemon::NotifySysadmin(SysadminNotifier::EventCode Event)
{
static const char *sEventNames[] =
{
@@ -2360,31 +2134,47 @@ void BackupDaemon::NotifySysadmin(int Event)
"backup-error",
"backup-start",
"backup-finish",
+ "backup-ok",
0
};
- BOX_TRACE("sizeof(sEventNames) == " << sizeof(sEventNames));
- BOX_TRACE("sizeof(*sEventNames) == " << sizeof(*sEventNames));
- BOX_TRACE("NotifyEvent__MAX == " << NotifyEvent__MAX);
- ASSERT((sizeof(sEventNames)/sizeof(*sEventNames)) == NotifyEvent__MAX + 1);
+ // BOX_TRACE("sizeof(sEventNames) == " << sizeof(sEventNames));
+ // BOX_TRACE("sizeof(*sEventNames) == " << sizeof(*sEventNames));
+ // BOX_TRACE("NotifyEvent__MAX == " << NotifyEvent__MAX);
+ ASSERT((sizeof(sEventNames)/sizeof(*sEventNames)) == SysadminNotifier::MAX + 1);
- BOX_TRACE("BackupDaemon::NotifySysadmin() called, event = " <<
- sEventNames[Event]);
-
- if(Event < 0 || Event >= NotifyEvent__MAX)
+ if(Event < 0 || Event >= SysadminNotifier::MAX)
{
+ BOX_ERROR("BackupDaemon::NotifySysadmin() called for "
+ "invalid event code " << Event);
THROW_EXCEPTION(BackupStoreException,
BadNotifySysadminEventCode);
}
- // Don't send lots of repeated messages
- if(mNotificationsSent[Event] &&
- Event != NotifyEvent_BackupStart &&
- Event != NotifyEvent_BackupFinish)
+ BOX_TRACE("BackupDaemon::NotifySysadmin() called, event = " <<
+ sEventNames[Event]);
+
+ if(!GetConfiguration().KeyExists("NotifyAlways") ||
+ !GetConfiguration().GetKeyValueBool("NotifyAlways"))
{
- BOX_WARNING("Suppressing duplicate notification about " <<
- sEventNames[Event]);
- return;
+ // Don't send lots of repeated messages
+ // Note: backup-start and backup-finish will always be
+ // logged, because mLastNotifiedEvent is never set to
+ // these values and therefore they are never "duplicates".
+ if(mLastNotifiedEvent == Event)
+ {
+ if(Event == SysadminNotifier::BackupOK)
+ {
+ BOX_INFO("Suppressing duplicate notification "
+ "about " << sEventNames[Event]);
+ }
+ else
+ {
+ BOX_WARNING("Suppressing duplicate notification "
+ "about " << sEventNames[Event]);
+ }
+ return;
+ }
}
// Is there a notification script?
@@ -2392,10 +2182,10 @@ void BackupDaemon::NotifySysadmin(int Event)
if(!conf.KeyExists("NotifyScript"))
{
// Log, and then return
- if(Event != NotifyEvent_BackupStart &&
- Event != NotifyEvent_BackupFinish)
+ if(Event != SysadminNotifier::BackupStart &&
+ Event != SysadminNotifier::BackupFinish)
{
- BOX_ERROR("Not notifying administrator about event "
+ BOX_INFO("Not notifying administrator about event "
<< sEventNames[Event] << " -- set NotifyScript "
"to do this in future");
}
@@ -2407,20 +2197,22 @@ void BackupDaemon::NotifySysadmin(int Event)
sEventNames[Event]);
// Log what we're about to do
- BOX_NOTICE("About to notify administrator about event "
+ BOX_INFO("About to notify administrator about event "
<< sEventNames[Event] << ", running script '"
<< script << "'");
// Then do it
- if(::system(script.c_str()) != 0)
+ int returnCode = ::system(script.c_str());
+ if(returnCode != 0)
{
- BOX_ERROR("Notify script returned an error code. ('"
- << script << "')");
+ BOX_WARNING("Notify script returned error code: " <<
+ returnCode << " ('" << script << "')");
+ }
+ else if(Event != SysadminNotifier::BackupStart &&
+ Event != SysadminNotifier::BackupFinish)
+ {
+ mLastNotifiedEvent = Event;
}
-
- // Flag that this is done so the administrator isn't constantly
- // bombarded with lots of errors
- mNotificationsSent[Event] = true;
}
@@ -2461,13 +2253,13 @@ void BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &rContext)
// Entries to delete, and it's the right time to do so...
BOX_NOTICE("Deleting unused locations from store root...");
BackupProtocolClient &connection(rContext.GetConnection());
- for(std::vector<std::pair<int64_t,std::string> >::iterator i(mUnusedRootDirEntries.begin()); i != mUnusedRootDirEntries.end(); ++i)
+ for(std::vector<std::pair<int64_t,std::string> >::iterator
+ i(mUnusedRootDirEntries.begin());
+ i != mUnusedRootDirEntries.end(); ++i)
{
connection.QueryDeleteDirectory(i->first);
-
- // Log this
- BOX_NOTICE("Deleted " << i->second << " (ID " << i->first
- << ") from store root");
+ rContext.GetProgressNotifier().NotifyFileDeleted(
+ i->first, i->second);
}
// Reset state
@@ -2738,9 +2530,12 @@ BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime)
-// Purpose: Serializes remote directory and file information into a stream of bytes, using an Archive abstraction.
-//
+// Name: BackupDaemon::SerializeStoreObjectInfo(
+// box_time_t theLastSyncTime,
+// box_time_t theNextSyncTime)
+// Purpose: Serializes remote directory and file information
+// into a stream of bytes, using an Archive
+// abstraction.
// Created: 2005/04/11
//
// --------------------------------------------------------------------------
@@ -2749,7 +2544,8 @@ static const int STOREOBJECTINFO_MAGIC_ID_VALUE = 0x7777525F;
static const std::string STOREOBJECTINFO_MAGIC_ID_STRING = "BBACKUPD-STATE";
static const int STOREOBJECTINFO_VERSION = 2;
-bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime) const
+bool BackupDaemon::SerializeStoreObjectInfo(box_time_t theLastSyncTime,
+ box_time_t theNextSyncTime) const
{
if(!GetConfiguration().KeyExists("StoreObjectInfoFile"))
{
@@ -2778,7 +2574,7 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
anArchive.Write(STOREOBJECTINFO_MAGIC_ID_STRING);
anArchive.Write(STOREOBJECTINFO_VERSION);
anArchive.Write(GetLoadedConfigModifiedTime());
- anArchive.Write(aClientStoreMarker);
+ anArchive.Write(mClientStoreMarker);
anArchive.Write(theLastSyncTime);
anArchive.Write(theNextSyncTime);
@@ -2830,15 +2626,13 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
}
catch(std::exception &e)
{
- BOX_ERROR("Internal error writing store object "
- "info file (" << StoreObjectInfoFile << "): "
- << e.what());
+ BOX_ERROR("Failed to write StoreObjectInfoFile: " <<
+ StoreObjectInfoFile << ": " << e.what());
}
catch(...)
{
- BOX_ERROR("Internal error writing store object "
- "info file (" << StoreObjectInfoFile << "): "
- "unknown error");
+ BOX_ERROR("Failed to write StoreObjectInfoFile: " <<
+ StoreObjectInfoFile << ": unknown error");
}
return created;
@@ -2847,13 +2641,17 @@ bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_time_t & theLastSyncTime, box_time_t & theNextSyncTime)
-// Purpose: Deserializes remote directory and file information from a stream of bytes, using an Archive abstraction.
-//
+// Name: BackupDaemon::DeserializeStoreObjectInfo(
+// box_time_t & theLastSyncTime,
+// box_time_t & theNextSyncTime)
+// Purpose: Deserializes remote directory and file information
+// from a stream of bytes, using an Archive
+// abstraction.
// Created: 2005/04/11
//
// --------------------------------------------------------------------------
-bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_time_t & theLastSyncTime, box_time_t & theNextSyncTime)
+bool BackupDaemon::DeserializeStoreObjectInfo(box_time_t & theLastSyncTime,
+ box_time_t & theNextSyncTime)
{
//
//
@@ -2945,7 +2743,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
//
// this is it, go at it
//
- anArchive.Read(aClientStoreMarker);
+ anArchive.Read(mClientStoreMarker);
anArchive.Read(theLastSyncTime);
anArchive.Read(theNextSyncTime);
@@ -3023,7 +2821,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
DeleteAllLocations();
- aClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown;
+ mClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown;
theLastSyncTime = 0;
theNextSyncTime = 0;
@@ -3057,9 +2855,10 @@ bool BackupDaemon::DeleteStoreObjectInfo() const
// Check to see if the file exists
if(!FileExists(storeObjectInfoFile.c_str()))
{
- // File doesn't exist -- so can't be deleted. But something isn't quite right, so log a message
- BOX_WARNING("Store object info file did not exist when it "
- "was supposed to. (" << storeObjectInfoFile << ")");
+ // File doesn't exist -- so can't be deleted. But something
+ // isn't quite right, so log a message
+ BOX_WARNING("StoreObjectInfoFile did not exist when it "
+ "was supposed to: " << storeObjectInfoFile);
// Return true to stop things going around in a loop
return true;
@@ -3068,8 +2867,8 @@ bool BackupDaemon::DeleteStoreObjectInfo() const
// Actually delete it
if(::unlink(storeObjectInfoFile.c_str()) != 0)
{
- BOX_ERROR("Failed to delete the old store object info file: "
- << storeObjectInfoFile << ": "<< strerror(errno));
+ BOX_LOG_SYS_ERROR("Failed to delete the old "
+ "StoreObjectInfoFile: " << storeObjectInfoFile);
return false;
}
diff --git a/bin/bbackupd/BackupDaemon.h b/bin/bbackupd/BackupDaemon.h
index 62f9c393..0c864abd 100644
--- a/bin/bbackupd/BackupDaemon.h
+++ b/bin/bbackupd/BackupDaemon.h
@@ -14,16 +14,20 @@
#include <string>
#include <memory>
+#include "BackupClientContext.h"
+#include "BackupClientDirectoryRecord.h"
#include "BoxTime.h"
#include "Daemon.h"
-#include "BackupClientDirectoryRecord.h"
+#include "Logging.h"
#include "Socket.h"
#include "SocketListen.h"
#include "SocketStream.h"
-#include "Logging.h"
+#include "TLSContext.h"
+
#include "autogen_BackupProtocolClient.h"
#ifdef WIN32
+ #include "WinNamedPipeListener.h"
#include "WinNamedPipeStream.h"
#endif
@@ -43,7 +47,8 @@ class Archive;
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
-class BackupDaemon : public Daemon, ProgressNotifier
+class BackupDaemon : public Daemon, ProgressNotifier, LocationResolver,
+RunStatusProvider, SysadminNotifier
{
public:
BackupDaemon();
@@ -52,10 +57,10 @@ public:
private:
// methods below do partial (specialized) serialization of
// client state only
- bool SerializeStoreObjectInfo(int64_t aClientStoreMarker,
- box_time_t theLastSyncTime, box_time_t theNextSyncTime) const;
- bool DeserializeStoreObjectInfo(int64_t & aClientStoreMarker,
- box_time_t & theLastSyncTime, box_time_t & theNextSyncTime);
+ bool SerializeStoreObjectInfo(box_time_t theLastSyncTime,
+ box_time_t theNextSyncTime) const;
+ bool DeserializeStoreObjectInfo(box_time_t & theLastSyncTime,
+ box_time_t & theNextSyncTime);
bool DeleteStoreObjectInfo() const;
BackupDaemon(const BackupDaemon &);
@@ -65,6 +70,14 @@ public:
std::string GetOptionString();
int ProcessOption(signed int option);
int Main(const std::string &rConfigFileName);
+
+ // This shouldn't be here, but apparently gcc on
+ // Windows has no idea about inherited methods...
+ virtual int Main(const char *DefaultConfigFile, int argc,
+ const char *argv[])
+ {
+ return Daemon::Main(DefaultConfigFile, argc, argv);
+ }
#endif
void Run();
@@ -88,21 +101,22 @@ public:
int GetState() {return mState;}
// Allow other classes to call this too
- enum
- {
- NotifyEvent_StoreFull = 0,
- NotifyEvent_ReadError,
- NotifyEvent_BackupError,
- NotifyEvent_BackupStart,
- NotifyEvent_BackupFinish,
- NotifyEvent__MAX
- // When adding notifications, remember to add strings to NotifySysadmin()
- };
- void NotifySysadmin(int Event);
+ void NotifySysadmin(SysadminNotifier::EventCode Event);
private:
void Run2();
+public:
+ void InitCrypto();
+ void RunSyncNowWithExceptionHandling();
+ void RunSyncNow();
+ void OnBackupStart();
+ void OnBackupFinish();
+ // TouchFileInWorkingDir is only here for use by Boxi.
+ // This does NOT constitute an API!
+ void TouchFileInWorkingDir(const char *Filename);
+
+private:
void DeleteAllLocations();
void SetupLocations(BackupClientContext &rClientContext, const Configuration &rLocationsConf);
@@ -126,8 +140,6 @@ private:
void CloseCommandConnection();
void SendSyncStartOrFinish(bool SendStart);
- void TouchFileInWorkingDir(const char *Filename);
-
void DeleteUnusedRootDirEntries(BackupClientContext &rContext);
#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
@@ -137,7 +149,7 @@ private:
int UseScriptToSeeIfSyncAllowed();
-private:
+public:
class Location
{
public:
@@ -157,7 +169,11 @@ private:
ExcludeList *mpExcludeFiles;
ExcludeList *mpExcludeDirs;
};
-
+
+ typedef const std::vector<Location *> Locations;
+ Locations GetLocations() { return mLocations; }
+
+private:
int mState; // what the daemon is currently doing
std::vector<Location *> mLocations;
@@ -179,7 +195,8 @@ private:
CommandSocketInfo &operator=(const CommandSocketInfo &);
public:
#ifdef WIN32
- WinNamedPipeStream mListeningSocket;
+ WinNamedPipeListener<1 /* listen backlog */> mListeningSocket;
+ std::auto_ptr<WinNamedPipeStream> mpConnectedSocket;
#else
SocketListen<SocketStream, 1 /* listen backlog */> mListeningSocket;
std::auto_ptr<SocketStream> mpConnectedSocket;
@@ -188,23 +205,51 @@ private:
};
// Using a socket?
- CommandSocketInfo *mpCommandSocketInfo;
+ std::auto_ptr<CommandSocketInfo> mapCommandSocketInfo;
// Stop notifications being repeated.
- bool mNotificationsSent[NotifyEvent__MAX];
+ SysadminNotifier::EventCode mLastNotifiedEvent;
// Unused entries in the root directory wait a while before being deleted
box_time_t mDeleteUnusedRootDirEntriesAfter; // time to delete them
std::vector<std::pair<int64_t,std::string> > mUnusedRootDirEntries;
+ int64_t mClientStoreMarker;
+ bool mStorageLimitExceeded;
+ bool mReadErrorsOnFilesystemObjects;
+ box_time_t mLastSyncTime, mNextSyncTime;
+ box_time_t mCurrentSyncStartTime, mUpdateStoreInterval;
+ TLSContext mTlsContext;
+ bool mDeleteStoreObjectInfoFile;
+ bool mDoSyncForcedByPreviousSyncError;
+
public:
bool StopRun() { return this->Daemon::StopRun(); }
+ bool StorageLimitExceeded() { return mStorageLimitExceeded; }
private:
bool mLogAllFileAccess;
+public:
+ ProgressNotifier* GetProgressNotifier() { return mpProgressNotifier; }
+ LocationResolver* GetLocationResolver() { return mpLocationResolver; }
+ RunStatusProvider* GetRunStatusProvider() { return mpRunStatusProvider; }
+ SysadminNotifier* GetSysadminNotifier() { return mpSysadminNotifier; }
+ void SetProgressNotifier (ProgressNotifier* p) { mpProgressNotifier = p; }
+ void SetLocationResolver (LocationResolver* p) { mpLocationResolver = p; }
+ void SetRunStatusProvider(RunStatusProvider* p) { mpRunStatusProvider = p; }
+ void SetSysadminNotifier (SysadminNotifier* p) { mpSysadminNotifier = p; }
+
+private:
+ ProgressNotifier* mpProgressNotifier;
+ LocationResolver* mpLocationResolver;
+ RunStatusProvider* mpRunStatusProvider;
+ SysadminNotifier* mpSysadminNotifier;
+
/* ProgressNotifier implementation */
public:
+ virtual void NotifyIDMapsSetup(BackupClientContext& rContext) { }
+
virtual void NotifyScanDirectory(
const BackupClientDirectoryRecord* pDirRecord,
const std::string& rLocalPath)
@@ -387,7 +432,7 @@ public:
{
if (mLogAllFileAccess)
{
- BOX_INFO("Uploading complete file: " << rLocalPath);
+ BOX_NOTICE("Uploading complete file: " << rLocalPath);
}
}
virtual void NotifyFileUploadingPatch(
@@ -396,7 +441,7 @@ public:
{
if (mLogAllFileAccess)
{
- BOX_INFO("Uploading patch to file: " << rLocalPath);
+ BOX_NOTICE("Uploading patch to file: " << rLocalPath);
}
}
virtual void NotifyFileUploaded(
@@ -406,7 +451,7 @@ public:
{
if (mLogAllFileAccess)
{
- BOX_INFO("Uploaded file: " << rLocalPath);
+ BOX_NOTICE("Uploaded file: " << rLocalPath);
}
}
virtual void NotifyFileSynchronised(
@@ -419,18 +464,51 @@ public:
BOX_INFO("Synchronised file: " << rLocalPath);
}
}
+ virtual void NotifyDirectoryDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_NOTICE("Deleted directory: " << rRemotePath <<
+ " (ID " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ ")");
+ }
+ }
+ virtual void NotifyFileDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_NOTICE("Deleted file: " << rRemotePath <<
+ " (ID " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ ")");
+ }
+ }
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset,
+ int64_t length, box_time_t elapsed, box_time_t finish)
+ {
+ BOX_TRACE("Read " << readSize << " bytes at " << offset <<
+ ", " << (length - offset) << " remain, eta " <<
+ BoxTimeToSeconds(finish - elapsed) << "s");
+ }
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset,
+ int64_t length)
+ {
+ BOX_TRACE("Read " << readSize << " bytes at " << offset <<
+ ", " << (length - offset) << " remain");
+ }
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset)
+ {
+ BOX_TRACE("Read " << readSize << " bytes at " << offset <<
+ ", unknown bytes remaining");
+ }
#ifdef WIN32
- public:
- void RunHelperThread(void);
-
private:
- bool mDoSyncFlagOut, mSyncIsForcedOut;
bool mInstallService, mRemoveService, mRunAsService;
std::string mServiceName;
- HANDLE mhMessageToSendEvent, mhCommandReceivedEvent;
- CRITICAL_SECTION mMessageQueueLock;
- std::vector<std::string> mMessageList;
#endif
};
diff --git a/bin/bbackupd/BackupDaemonInterface.h b/bin/bbackupd/BackupDaemonInterface.h
new file mode 100644
index 00000000..5bbdd427
--- /dev/null
+++ b/bin/bbackupd/BackupDaemonInterface.h
@@ -0,0 +1,164 @@
+// --------------------------------------------------------------------------
+//
+// File
+// Name: BackupDaemonInterface.h
+// Purpose: Interfaces for managing a BackupDaemon
+// Created: 2008/12/30
+//
+// --------------------------------------------------------------------------
+
+#ifndef BACKUPDAEMONINTERFACE__H
+#define BACKUPDAEMONINTERFACE__H
+
+#include <string>
+// #include <map>
+
+// #include "BackupClientFileAttributes.h"
+// #include "BackupStoreDirectory.h"
+#include "BoxTime.h"
+// #include "MD5Digest.h"
+// #include "ReadLoggingStream.h"
+// #include "RunStatusProvider.h"
+
+class Archive;
+class BackupClientContext;
+class BackupDaemon;
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: SysadminNotifier
+// Purpose: Provides a NotifySysadmin() method to send mail to the sysadmin
+// Created: 2005/11/15
+//
+// --------------------------------------------------------------------------
+class SysadminNotifier
+{
+ public:
+ virtual ~SysadminNotifier() { }
+
+ typedef enum
+ {
+ StoreFull = 0,
+ ReadError,
+ BackupError,
+ BackupStart,
+ BackupFinish,
+ BackupOK,
+ MAX
+ // When adding notifications, remember to add
+ // strings to NotifySysadmin()
+ }
+ EventCode;
+
+ virtual void NotifySysadmin(EventCode Event) = 0;
+};
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: ProgressNotifier
+// Purpose: Provides methods for the backup library to inform the user
+// interface about its progress with the backup
+// Created: 2005/11/20
+//
+// --------------------------------------------------------------------------
+
+class BackupClientContext;
+class BackupClientDirectoryRecord;
+
+class ProgressNotifier
+{
+ public:
+ virtual ~ProgressNotifier() { }
+ virtual void NotifyIDMapsSetup(BackupClientContext& rContext) = 0;
+ virtual void NotifyScanDirectory(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyDirStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyFileStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyDirListFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyMountPointSkipped(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyDirExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyUnsupportedFileType(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileReadFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyFileModifiedInFuture(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileSkippedServerFull(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploadException(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const BoxException& rException) = 0;
+ virtual void NotifyFileUploadServerError(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int type, int subtype) = 0;
+ virtual void NotifyFileUploading(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploadingPatch(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploaded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize) = 0;
+ virtual void NotifyFileSynchronised(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize) = 0;
+ virtual void NotifyDirectoryDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath) = 0;
+ virtual void NotifyFileDeleted(
+ int64_t ObjectID,
+ const std::string& rRemotePath) = 0;
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset,
+ int64_t length, box_time_t elapsed, box_time_t finish) = 0;
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset,
+ int64_t length) = 0;
+ virtual void NotifyReadProgress(int64_t readSize, int64_t offset) = 0;
+};
+
+// --------------------------------------------------------------------------
+//
+// Class
+// Name: LocationResolver
+// Purpose: Interface for classes that can resolve locations to paths,
+// like BackupDaemon
+// Created: 2003/10/08
+//
+// --------------------------------------------------------------------------
+class LocationResolver
+{
+public:
+ virtual ~LocationResolver() { }
+ virtual bool FindLocationPathName(const std::string &rLocationName,
+ std::string &rPathOut) const = 0;
+};
+
+#endif // BACKUPDAEMONINTERFACE__H
diff --git a/bin/bbackupd/Win32ServiceFunctions.cpp b/bin/bbackupd/Win32ServiceFunctions.cpp
index a7bf6bd9..2df914a7 100644
--- a/bin/bbackupd/Win32ServiceFunctions.cpp
+++ b/bin/bbackupd/Win32ServiceFunctions.cpp
@@ -203,12 +203,12 @@ int InstallService(const char* pConfigFileName, const std::string& rServiceName)
{
if (pConfigFileName != NULL)
{
- struct stat st;
+ EMU_STRUCT_STAT st;
if (emu_stat(pConfigFileName, &st) != 0)
{
- BOX_ERROR("Failed to open configuration file '" <<
- pConfigFileName << "': " << strerror(errno));
+ BOX_LOG_SYS_ERROR("Failed to open configuration file "
+ "'" << pConfigFileName << "'");
return 1;
}
@@ -221,7 +221,7 @@ int InstallService(const char* pConfigFileName, const std::string& rServiceName)
}
}
- SC_HANDLE scm = OpenSCManager(0,0,SC_MANAGER_CREATE_SERVICE);
+ SC_HANDLE scm = OpenSCManager(0, 0, SC_MANAGER_CREATE_SERVICE);
if (!scm)
{
diff --git a/bin/bbackupd/bbackupd-config.in b/bin/bbackupd/bbackupd-config.in
index 16ddb75c..925dcc3e 100755
--- a/bin/bbackupd/bbackupd-config.in
+++ b/bin/bbackupd/bbackupd-config.in
@@ -26,7 +26,7 @@ Parameters:
explicitly, using bbackupctl sync
account-num (hexdecimal) and server-hostname
are supplied by the server administrator
- working-dir is usually @localstatedir_expanded@
+ working-dir is usually @localstatedir_expanded@/bbackupd
backup directories is list of directories to back up
__E
@@ -227,7 +227,7 @@ SUBJECT="BACKUP PROBLEM on host $hostname"
SENDTO="$current_username"
if [ "\$1" = "" ]; then
- echo "Usage: $0 <store-full|read-error|backup-error|backup-start|backup-finish>" >&2
+ echo "Usage: \$0 <store-full|read-error|backup-error|backup-start|backup-finish>" >&2
exit 2
elif [ "\$1" = store-full ]; then
$sendmail \$SENDTO <<EOM
@@ -577,7 +577,7 @@ What you need to do now...
more files will be backed up. You want to know about this.
6) Start the backup daemon with the command
- @bindir_expanded@/bbackupd$daemon_args
+ @sbindir_expanded@/bbackupd$daemon_args
in /etc/rc.local, or your local equivalent.
Note that bbackupd must run as root.
__E
diff --git a/bin/bbackupd/bbackupd.cpp b/bin/bbackupd/bbackupd.cpp
index a0f275b3..d334a2df 100644
--- a/bin/bbackupd/bbackupd.cpp
+++ b/bin/bbackupd/bbackupd.cpp
@@ -29,7 +29,7 @@ int main(int argc, const char *argv[])
MAINHELPER_START
- Logging::SetProgramName("Box Backup (bbackupd)");
+ Logging::SetProgramName("bbackupd");
Logging::ToConsole(true);
Logging::ToSyslog (true);
diff --git a/bin/bbackupd/win32/NotifySysAdmin.vbs b/bin/bbackupd/win32/NotifySysAdmin.vbs
index 49082887..712d92da 100644
--- a/bin/bbackupd/win32/NotifySysAdmin.vbs
+++ b/bin/bbackupd/win32/NotifySysAdmin.vbs
@@ -10,44 +10,62 @@ Dim smtpserver
Set WshNet = CreateObject("WScript.Network")
hostname = WshNet.ComputerName
-account = "0a1"
+account = "0x1"
from = "boxbackup@" & hostname
sendto = "admin@example.com"
-subjtmpl = "BACKUP PROBLEM on host " & hostname
smtpserver = "smtp.example.com"
+subjtmpl = "BACKUP PROBLEM on host " & hostname
Set args = WScript.Arguments
If args(0) = "store-full" Then
subject = subjtmpl & " (store full)"
- body = "The store account for "&hostname&" is full." & vbCrLf & vbCrLf & _
- "=============================" & vbCrLf & _
- "FILES ARE NOT BEING BACKED UP" & vbCrLf & _
- "=============================" & vbCrLf & vbCrLf & _
- "Please adjust the limits on account "&account&" on server "&hostname&"." _
- & vbCrLf
+ body = "The store account for "&hostname&" is full." & vbCrLf & _
+ vbCrLf & _
+ "=============================" & vbCrLf & _
+ "FILES ARE NOT BEING BACKED UP" & vbCrLf & _
+ "=============================" & vbCrLf & _
+ vbCrLf & _
+ "Please adjust the limits on account "&account&" on server "&hostname&"." _
+ & vbCrLf
SendMail from,sendto,subject,body
ElseIf args(0) = "read-error" Then
subject = subjtmpl & " (read errors)"
- body = "Errors occured reading some files or directories for backup on "&hostname&"." _
- & vbCrLf & vbCrLf & _
- "===================================" & vbCrLf & _
- "THESE FILES ARE NOT BEING BACKED UP" & vbCrLf & _
- "===================================" & vbCrLf & vbCrLf & _
- "Check the logs on "&hostname&" for the files and directories which caused" & _
- "these errors, and take appropraite action." & vbCrLf & vbCrLf & _
- "Other files are being backed up." & vbCrLf
+ body = "Errors occurred reading some files or directories " & _
+ "for backup on " & hostname & "." & vbCrLf & _
+ vbCrLf & _
+ "===================================" & vbCrLf & _
+ "THESE FILES ARE NOT BEING BACKED UP" & vbCrLf & _
+ "===================================" & vbCrLf & vbCrLf & _
+ "Check the logs on "&hostname&" for the files and " & _
+ "directories which caused" & vbCrLf & _
+ "these errors, and take appropriate action." & vbCrLf & _
+ vbCrLf & _
+ "Other files are being backed up." & vbCrLf
+ SendMail from,sendto,subject,body
+ElseIf args(0) = "backup-error" Then
+ subject = subjtmpl & " (read errors)"
+ body = "An error occurred during the backup on "&hostname&"." _
+ & vbCrLf & vbCrLf & _
+ "==========================" & vbCrLf & _
+ "FILES MAY NOT BE BACKED UP" & vbCrLf & _
+ "==========================" & vbCrLf & _
+ vbCrLf & _
+ "Check the logs on "&hostname&" for more " & _
+ "information about the error, " & vbCrLf & _
+ "and take appropriate action." & vbCrLf
SendMail from,sendto,subject,body
-ElseIf args(0) = "backup-start" Or args(0) = "backup-finish" Then
+ElseIf args(0) = "backup-start" Or args(0) = "backup-finish" _
+ Or args(0) = "backup-ok" Then
' do nothing for these messages by default
Else
subject = subjtmpl & " (unknown)"
body = "The backup daemon on "&hostname&" reported an unknown error." _
- & vbCrLf & vbCrLf & _
- "==========================" & vbCrLf & _
- "FILES MAY NOT BE BACKED UP" & vbCrLf & _
- "==========================" & vbCrLf & vbCrLf & _
- "Please check the logs on "&hostname&"." & vbCrLf
+ & vbCrLf & vbCrLf & _
+ "==========================" & vbCrLf & _
+ "FILES MAY NOT BE BACKED UP" & vbCrLf & _
+ "==========================" & vbCrLf & vbCrLf & _
+ "Please check the logs on "&hostname&"." & vbCrLf
SendMail from,sendto,subject,body
End If
diff --git a/bin/bbackupd/win32/bbackupd.conf b/bin/bbackupd/win32/bbackupd.conf
index 6c987f7d..b0793b29 100644
--- a/bin/bbackupd/win32/bbackupd.conf
+++ b/bin/bbackupd/win32/bbackupd.conf
@@ -173,7 +173,7 @@ Server
# If a directive ends in Regex, then it is a regular expression rather than a
# explicit full pathname. See:
#
-# http://bbdev.fluffy.co.uk/trac/wiki/Win32Regex
+# http://www.boxbackup.org/trac/wiki/Win32Regex
#
# for more information about regular expressions on Windows.
#