summaryrefslogtreecommitdiff
path: root/bin/bbackupd
diff options
context:
space:
mode:
authorReinhard Tartler <siretart@tauware.de>2008-01-19 15:08:54 +0100
committerReinhard Tartler <siretart@tauware.de>2008-01-19 15:08:54 +0100
commit2733267954e91e394fbb512ea3abb4c497c0752f (patch)
treed6cdebd8776bceba06a2fb5e4ed06a4744bc1b57 /bin/bbackupd
parent1d56581c644c53f1b9a182c6574bc2fc5243d4d1 (diff)
import version 0.11rc1
This commit has been made by 'bzr import'. I used the upstream tarball of Version 0.11rc1 for creating it. It has the md5sum: 75608d8bb72dff9a556850ccd0ae8cb9
Diffstat (limited to 'bin/bbackupd')
-rw-r--r--bin/bbackupd/BackupClientContext.cpp210
-rw-r--r--bin/bbackupd/BackupClientContext.h40
-rw-r--r--bin/bbackupd/BackupClientDeleteList.cpp2
-rw-r--r--bin/bbackupd/BackupClientDeleteList.h2
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.cpp383
-rw-r--r--bin/bbackupd/BackupClientDirectoryRecord.h91
-rw-r--r--bin/bbackupd/BackupClientInodeToIDMap.cpp2
-rw-r--r--bin/bbackupd/BackupClientInodeToIDMap.h2
-rw-r--r--bin/bbackupd/BackupDaemon.cpp1421
-rw-r--r--bin/bbackupd/BackupDaemon.h268
-rw-r--r--bin/bbackupd/ClientException.txt11
-rw-r--r--bin/bbackupd/Makefile.extra7
-rw-r--r--bin/bbackupd/Win32BackupService.cpp39
-rw-r--r--bin/bbackupd/Win32BackupService.h4
-rw-r--r--bin/bbackupd/Win32ServiceFunctions.cpp241
-rw-r--r--bin/bbackupd/Win32ServiceFunctions.h8
-rwxr-xr-xbin/bbackupd/bbackupd-config140
-rwxr-xr-xbin/bbackupd/bbackupd-config.in599
-rw-r--r--bin/bbackupd/bbackupd.cpp78
-rw-r--r--bin/bbackupd/win32/NotifySysAdmin.vbs95
-rw-r--r--bin/bbackupd/win32/ReadMe.txt24
-rw-r--r--bin/bbackupd/win32/bbackupd.conf175
22 files changed, 2963 insertions, 879 deletions
diff --git a/bin/bbackupd/BackupClientContext.cpp b/bin/bbackupd/BackupClientContext.cpp
index 2445b077..ee8ffdd7 100644
--- a/bin/bbackupd/BackupClientContext.cpp
+++ b/bin/bbackupd/BackupClientContext.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -47,12 +47,10 @@
#include "Box.h"
-#ifdef HAVE_SYSLOG_H
- #include <syslog.h>
-#endif
#ifdef HAVE_SIGNAL_H
#include <signal.h>
#endif
+
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
@@ -67,19 +65,28 @@
#include "BackupDaemon.h"
#include "autogen_BackupProtocolClient.h"
#include "BackupStoreFile.h"
+#include "Logging.h"
#include "MemLeakFindOn.h"
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupClientContext::BackupClientContext(BackupDaemon &, TLSContext &, const std::string &, int32_t, bool)
+// Name: BackupClientContext::BackupClientContext(BackupDaemon &, TLSContext &, const std::string &, int32_t, bool, bool, std::string)
// Purpose: Constructor
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
-BackupClientContext::BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
- int32_t AccountNumber, bool ExtendedLogging)
+BackupClientContext::BackupClientContext
+(
+ BackupDaemon &rDaemon,
+ TLSContext &rTLSContext,
+ const std::string &rHostname,
+ int32_t AccountNumber,
+ bool ExtendedLogging,
+ bool ExtendedLogToFile,
+ std::string ExtendedLogFile
+)
: mrDaemon(rDaemon),
mrTLSContext(rTLSContext),
mHostname(rHostname),
@@ -87,6 +94,9 @@ BackupClientContext::BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLS
mpSocket(0),
mpConnection(0),
mExtendedLogging(ExtendedLogging),
+ mExtendedLogToFile(ExtendedLogToFile),
+ mExtendedLogFile(ExtendedLogFile),
+ mpExtendedLogFileHandle(NULL),
mClientStoreMarker(ClientStoreMarker_NotKnown),
mpDeleteList(0),
mpCurrentIDMap(0),
@@ -94,8 +104,8 @@ BackupClientContext::BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLS
mStorageLimitExceeded(false),
mpExcludeFiles(0),
mpExcludeDirs(0),
- mbIsManaged(false),
- mTimeMgmtEpoch(0)
+ mKeepAliveTimer(0),
+ mbIsManaged(false)
{
}
@@ -153,7 +163,8 @@ BackupProtocolClient &BackupClientContext::GetConnection()
}
// Log intention
- ::syslog(LOG_INFO, "Opening connection to server %s...", mHostname.c_str());
+ BOX_INFO("Opening connection to server '" <<
+ mHostname << "'...");
// Connect!
mpSocket->Open(mrTLSContext, Socket::TypeINET, mHostname.c_str(), BOX_PORT_BBSTORED);
@@ -164,6 +175,24 @@ BackupProtocolClient &BackupClientContext::GetConnection()
// Set logging option
mpConnection->SetLogToSysLog(mExtendedLogging);
+ if (mExtendedLogToFile)
+ {
+ ASSERT(mpExtendedLogFileHandle == NULL);
+
+ mpExtendedLogFileHandle = fopen(
+ mExtendedLogFile.c_str(), "a+");
+
+ if (!mpExtendedLogFileHandle)
+ {
+ BOX_ERROR("Failed to open extended log "
+ "file: " << strerror(errno));
+ }
+ else
+ {
+ mpConnection->SetLogToFile(mpExtendedLogFileHandle);
+ }
+ }
+
// Handshake
mpConnection->Handshake();
@@ -202,19 +231,16 @@ BackupProtocolClient &BackupClientContext::GetConnection()
}
// Log success
- ::syslog(LOG_INFO, "Connection made, login successful");
+ BOX_INFO("Connection made, login successful");
// Check to see if there is any space available on the server
- int64_t softLimit = loginConf->GetBlocksSoftLimit();
- int64_t hardLimit = loginConf->GetBlocksHardLimit();
- // Threshold for uploading new stuff
- int64_t stopUploadThreshold = softLimit + ((hardLimit - softLimit) / 3);
- if(loginConf->GetBlocksUsed() > stopUploadThreshold)
+ if(loginConf->GetBlocksUsed() >= loginConf->GetBlocksHardLimit())
{
// no -- flag so only things like deletions happen
mStorageLimitExceeded = true;
// Log
- ::syslog(LOG_INFO, "Exceeded storage limits on server -- not uploading changes to files");
+ BOX_WARNING("Exceeded storage hard-limit on server, "
+ "not uploading changes to files");
}
}
catch(...)
@@ -294,6 +320,12 @@ void BackupClientContext::CloseAnyOpenConnection()
delete mpDeleteList;
mpDeleteList = 0;
}
+
+ if (mpExtendedLogFileHandle != NULL)
+ {
+ fclose(mpExtendedLogFileHandle);
+ mpExtendedLogFileHandle = NULL;
+ }
}
@@ -341,8 +373,8 @@ BackupClientDeleteList &BackupClientContext::GetDeleteList()
// --------------------------------------------------------------------------
//
// Function
-// Name:
-// Purpose:
+// Name: BackupClientContext::PerformDeletions()
+// Purpose: Perform any pending file deletions.
// Created: 10/11/03
//
// --------------------------------------------------------------------------
@@ -499,35 +531,18 @@ bool BackupClientContext::FindFilename(int64_t ObjectID, int64_t ContainingDirec
return true;
}
-
-// maximum time to spend diffing
-static int sMaximumDiffTime = 600;
-// maximum time of SSL inactivity (keep-alive interval)
-static int sKeepAliveTime = 0;
-
void BackupClientContext::SetMaximumDiffingTime(int iSeconds)
{
- sMaximumDiffTime = iSeconds < 0 ? 0 : iSeconds;
- TRACE1("Set maximum diffing time to %d seconds\n", sMaximumDiffTime);
+ mMaximumDiffingTime = iSeconds < 0 ? 0 : iSeconds;
+ BOX_TRACE("Set maximum diffing time to " << mMaximumDiffingTime <<
+ " seconds");
}
void BackupClientContext::SetKeepAliveTime(int iSeconds)
{
- sKeepAliveTime = iSeconds < 0 ? 0 : iSeconds;
- TRACE1("Set keep-alive time to %d seconds\n", sKeepAliveTime);
-}
-
-// --------------------------------------------------------------------------
-//
-// Function
-// Name: static TimerSigHandler(int)
-// Purpose: Signal handler
-// Created: 19/3/04
-//
-// --------------------------------------------------------------------------
-static void TimerSigHandler(int iUnused)
-{
- BackupStoreFile::DiffTimerExpired();
+ mKeepAliveTime = iSeconds < 0 ? 0 : iSeconds;
+ BOX_TRACE("Set keep-alive time to " << mKeepAliveTime << " seconds");
+ mKeepAliveTimer = Timer(mKeepAliveTime);
}
// --------------------------------------------------------------------------
@@ -540,59 +555,8 @@ static void TimerSigHandler(int iUnused)
// --------------------------------------------------------------------------
void BackupClientContext::ManageDiffProcess()
{
- if (mbIsManaged || !mpConnection)
- return;
-
- ASSERT(mTimeMgmtEpoch == 0);
-
-#ifdef PLATFORM_CYGWIN
- ::signal(SIGALRM, TimerSigHandler);
-#elif defined WIN32
- // no support for SIGVTALRM
- SetTimerHandler(TimerSigHandler);
-#else
- ::signal(SIGVTALRM, TimerSigHandler);
-#endif // PLATFORM_CYGWIN
-
- struct itimerval timeout;
- memset(&timeout, 0, sizeof(timeout));
-
- //
- //
- //
- if (sMaximumDiffTime <= 0 && sKeepAliveTime <= 0)
- {
- TRACE0("Diff control not requested - letting things run wild\n");
- return;
- }
- else if (sMaximumDiffTime > 0 && sKeepAliveTime > 0)
- {
- timeout.it_value.tv_sec = sKeepAliveTime < sMaximumDiffTime ? sKeepAliveTime : sMaximumDiffTime;
- timeout.it_interval.tv_sec = sKeepAliveTime < sMaximumDiffTime ? sKeepAliveTime : sMaximumDiffTime;
- }
- else
- {
- timeout.it_value.tv_sec = sKeepAliveTime > 0 ? sKeepAliveTime : sMaximumDiffTime;
- timeout.it_interval.tv_sec = sKeepAliveTime > 0 ? sKeepAliveTime : sMaximumDiffTime;
- }
-
- // avoid race
- mTimeMgmtEpoch = time(NULL);
-
-#ifdef PLATFORM_CYGWIN
- if(::setitimer(ITIMER_REAL, &timeout, NULL) != 0)
-#else
- if(::setitimer(ITIMER_VIRTUAL, &timeout, NULL) != 0)
-#endif // PLATFORM_CYGWIN
- {
- mTimeMgmtEpoch = 0;
-
- TRACE0("WARNING: couldn't set file diff control timeout\n");
- THROW_EXCEPTION(BackupStoreException, Internal)
- }
-
+ ASSERT(!mbIsManaged);
mbIsManaged = true;
- TRACE0("Initiated timer for file diff control\n");
}
// --------------------------------------------------------------------------
@@ -605,33 +569,16 @@ void BackupClientContext::ManageDiffProcess()
// --------------------------------------------------------------------------
void BackupClientContext::UnManageDiffProcess()
{
- if (!mbIsManaged /* don't test for active connection, just do it */)
- return;
-
- struct itimerval timeout;
- memset(&timeout, 0, sizeof(timeout));
-
-#ifdef PLATFORM_CYGWIN
- if(::setitimer(ITIMER_REAL, &timeout, NULL) != 0)
-#else
- if(::setitimer(ITIMER_VIRTUAL, &timeout, NULL) != 0)
-#endif // PLATFORM_CYGWIN
- {
- TRACE0("WARNING: couldn't clear file diff control timeout\n");
- THROW_EXCEPTION(BackupStoreException, Internal)
- }
-
+ // ASSERT(mbIsManaged);
mbIsManaged = false;
- mTimeMgmtEpoch = 0;
-
- TRACE0("Suspended timer for file diff control\n");
}
// --------------------------------------------------------------------------
//
// Function
// Name: BackupClientContext::DoKeepAlive()
-// Purpose: Does something inconsequential over the SSL link to keep it up
+// Purpose: Check whether it's time to send a KeepAlive
+// message over the SSL link, and if so, send it.
// Created: 04/19/2005
//
// --------------------------------------------------------------------------
@@ -639,33 +586,26 @@ void BackupClientContext::DoKeepAlive()
{
if (!mpConnection)
{
- ::syslog(LOG_ERR, "DoKeepAlive() called with no connection!");
+ return;
+ }
+
+ if (mKeepAliveTime == 0)
+ {
return;
}
+ if (!mKeepAliveTimer.HasExpired())
+ {
+ return;
+ }
+
+ BOX_TRACE("KeepAliveTime reached, sending keep-alive message");
mpConnection->QueryGetIsAlive();
-}
-
-// --------------------------------------------------------------------------
-//
-// Function
-// Name: BackupClientContext::GetTimeMgmtEpoch()
-// Purpose: Returns the unix time when the diff was started, or zero
-// if the diff process is unmanaged.
-// Created: 04/19/2005
-//
-// --------------------------------------------------------------------------
-time_t BackupClientContext::GetTimeMgmtEpoch()
-{
- return mTimeMgmtEpoch;
+
+ mKeepAliveTimer = Timer(mKeepAliveTime);
}
int BackupClientContext::GetMaximumDiffingTime()
{
- return sMaximumDiffTime;
-}
-
-int BackupClientContext::GetKeepaliveTime()
-{
- return sKeepAliveTime;
+ return mMaximumDiffingTime;
}
diff --git a/bin/bbackupd/BackupClientContext.h b/bin/bbackupd/BackupClientContext.h
index d41dc78a..7d42a93e 100644
--- a/bin/bbackupd/BackupClientContext.h
+++ b/bin/bbackupd/BackupClientContext.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -52,6 +52,7 @@
#include "BackupClientDeleteList.h"
#include "BackupStoreFile.h"
#include "ExcludeList.h"
+#include "Timer.h"
class TLSContext;
class BackupProtocolClient;
@@ -73,8 +74,16 @@ class BackupStoreFilenameClear;
class BackupClientContext : public DiffTimer
{
public:
- BackupClientContext(BackupDaemon &rDaemon, TLSContext &rTLSContext, const std::string &rHostname,
- int32_t AccountNumber, bool ExtendedLogging);
+ BackupClientContext
+ (
+ BackupDaemon &rDaemon,
+ TLSContext &rTLSContext,
+ const std::string &rHostname,
+ int32_t AccountNumber,
+ bool ExtendedLogging,
+ bool ExtendedLogToFile,
+ std::string ExtendedLogFile
+ );
virtual ~BackupClientContext();
private:
BackupClientContext(const BackupClientContext &);
@@ -181,7 +190,7 @@ public:
// Created: 04/19/2005
//
// --------------------------------------------------------------------------
- static void SetMaximumDiffingTime(int iSeconds);
+ void SetMaximumDiffingTime(int iSeconds);
// --------------------------------------------------------------------------
//
@@ -191,7 +200,7 @@ public:
// Created: 04/19/2005
//
// --------------------------------------------------------------------------
- static void SetKeepAliveTime(int iSeconds);
+ void SetKeepAliveTime(int iSeconds);
// --------------------------------------------------------------------------
//
@@ -213,19 +222,18 @@ public:
// --------------------------------------------------------------------------
void UnManageDiffProcess();
- // --------------------------------------------------------------------------
+ // -------------------------------------------------------------------
//
// Function
// Name: BackupClientContext::DoKeepAlive()
- // Purpose: Does something inconsequential over the SSL link to
- // keep it up, implements DiffTimer interface
+ // Purpose: Check whether it's time to send a KeepAlive
+ // message over the SSL link, and if so, send it.
// Created: 04/19/2005
//
- // --------------------------------------------------------------------------
+ // -------------------------------------------------------------------
virtual void DoKeepAlive();
- virtual time_t GetTimeMgmtEpoch();
virtual int GetMaximumDiffingTime();
- virtual int GetKeepaliveTime();
+ virtual bool IsManaged() { return mbIsManaged; }
private:
BackupDaemon &mrDaemon;
@@ -235,6 +243,9 @@ private:
SocketStreamTLS *mpSocket;
BackupProtocolClient *mpConnection;
bool mExtendedLogging;
+ bool mExtendedLogToFile;
+ std::string mExtendedLogFile;
+ FILE* mpExtendedLogFileHandle;
int64_t mClientStoreMarker;
BackupClientDeleteList *mpDeleteList;
const BackupClientInodeToIDMap *mpCurrentIDMap;
@@ -242,11 +253,10 @@ private:
bool mStorageLimitExceeded;
ExcludeList *mpExcludeFiles;
ExcludeList *mpExcludeDirs;
-
+ Timer mKeepAliveTimer;
bool mbIsManaged;
- // unix time when diff was started
- time_t mTimeMgmtEpoch;
+ int mKeepAliveTime;
+ int mMaximumDiffingTime;
};
-
#endif // BACKUPCLIENTCONTEXT__H
diff --git a/bin/bbackupd/BackupClientDeleteList.cpp b/bin/bbackupd/BackupClientDeleteList.cpp
index 30f8ab47..2e154b50 100644
--- a/bin/bbackupd/BackupClientDeleteList.cpp
+++ b/bin/bbackupd/BackupClientDeleteList.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
diff --git a/bin/bbackupd/BackupClientDeleteList.h b/bin/bbackupd/BackupClientDeleteList.h
index 5a6fc212..71a668a5 100644
--- a/bin/bbackupd/BackupClientDeleteList.h
+++ b/bin/bbackupd/BackupClientDeleteList.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.cpp b/bin/bbackupd/BackupClientDirectoryRecord.cpp
index cccf2f9b..0d3300cb 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.cpp
+++ b/bin/bbackupd/BackupClientDirectoryRecord.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -67,6 +67,9 @@
#include "BackupDaemon.h"
#include "BackupStoreException.h"
#include "Archive.h"
+#include "PathUtils.h"
+#include "Logging.h"
+#include "ReadLoggingStream.h"
#include "MemLeakFindOn.h"
@@ -175,8 +178,8 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
{
// The directory has probably been deleted, so just ignore this error.
// In a future scan, this deletion will be noticed, deleted from server, and this object deleted.
- TRACE1("Stat failed for '%s' (directory)\n",
- rLocalPath.c_str());
+ rParams.GetProgressNotifier().NotifyDirStatFailed(
+ this, rLocalPath, strerror(errno));
return;
}
// Store inode number in map so directories are tracked in case they're renamed
@@ -204,15 +207,48 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
std::vector<std::string> dirs;
std::vector<std::string> files;
bool downloadDirectoryRecordBecauseOfFutureFiles = false;
+
+ struct stat dir_st;
+ if(::lstat(rLocalPath.c_str(), &dir_st) != 0)
+ {
+ // Report the error (logs and
+ // eventual email to administrator)
+ rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ rLocalPath, strerror(errno));
+
+ // FIXME move to NotifyFileStatFailed()
+ SetErrorWhenReadingFilesystemObject(rParams,
+ rLocalPath.c_str());
+
+ // This shouldn't happen, so we'd better not continue
+ THROW_EXCEPTION(CommonException, OSFileError)
+ }
+
// BLOCK
{
// read the contents...
DIR *dirHandle = 0;
try
{
+ rParams.GetProgressNotifier().NotifyScanDirectory(
+ this, rLocalPath);
+
dirHandle = ::opendir(rLocalPath.c_str());
if(dirHandle == 0)
{
+ // Report the error (logs and
+ // eventual email to administrator)
+ if (errno == EACCES)
+ {
+ rParams.GetProgressNotifier().NotifyDirListFailed(
+ this, rLocalPath, "Access denied");
+ }
+ else
+ {
+ rParams.GetProgressNotifier().NotifyDirListFailed(this,
+ rLocalPath, strerror(errno));
+ }
+
// Report the error (logs and eventual email to administrator)
SetErrorWhenReadingFilesystemObject(rParams, rLocalPath.c_str());
// Ignore this directory for now.
@@ -234,6 +270,8 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
std::string filename;
while((en = ::readdir(dirHandle)) != 0)
{
+ rParams.mrContext.DoKeepAlive();
+
// Don't need to use LinuxWorkaround_FinishDirentStruct(en, rLocalPath.c_str());
// on Linux, as a stat is performed to get all this info
@@ -245,13 +283,28 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
}
// Stat file to get info
- filename = rLocalPath + DIRECTORY_SEPARATOR +
- en->d_name;
+ filename = MakeFullPath(rLocalPath, en->d_name);
+ #ifdef WIN32
+ // Don't stat the file just yet, to ensure
+ // that users can exclude unreadable files
+ // to suppress warnings that they are
+ // not accessible.
+ //
+ // Our emulated readdir() abuses en->d_type,
+ // which would normally contain DT_REG,
+ // DT_DIR, etc, but we only use it here and
+ // prefer S_IFREG, S_IFDIR...
+ int type = en->d_type;
+ #else
if(::lstat(filename.c_str(), &st) != 0)
{
// Report the error (logs and
// eventual email to administrator)
+ rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ filename, strerror(errno));
+
+ // FIXME move to NotifyFileStatFailed()
SetErrorWhenReadingFilesystemObject(
rParams, filename.c_str());
@@ -259,7 +312,17 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
continue;
}
+ if(st.st_dev != dir_st.st_dev)
+ {
+ rParams.GetProgressNotifier()
+ .NotifyMountPointSkipped(this,
+ filename);
+ continue;
+ }
+
int type = st.st_mode & S_IFMT;
+ #endif
+
if(type == S_IFREG || type == S_IFLNK)
{
// File or symbolic link
@@ -267,6 +330,11 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeFile(filename))
{
+ rParams.GetProgressNotifier()
+ .NotifyFileExcluded(
+ this,
+ filename);
+
// Next item!
continue;
}
@@ -281,6 +349,11 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Exclude it?
if(rParams.mrContext.ExcludeDir(filename))
{
+ rParams.GetProgressNotifier()
+ .NotifyDirExcluded(
+ this,
+ filename);
+
// Next item!
continue;
}
@@ -290,11 +363,56 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
}
else
{
+ if(rParams.mrContext.ExcludeFile(filename))
+ {
+ rParams.GetProgressNotifier()
+ .NotifyFileExcluded(
+ this,
+ filename);
+ }
+ else
+ {
+ rParams.GetProgressNotifier()
+ .NotifyUnsupportedFileType(
+ this, filename);
+ SetErrorWhenReadingFilesystemObject(
+ rParams, filename.c_str());
+ }
+
continue;
}
// Here if the object is something to back up (file, symlink or dir, not excluded)
// So make the information for adding to the checksum
+
+ #ifdef WIN32
+ // We didn't stat the file before,
+ // but now we need the information.
+ if(::lstat(filename.c_str(), &st) != 0)
+ {
+ rParams.GetProgressNotifier()
+ .NotifyFileStatFailed(this,
+ filename,
+ strerror(errno));
+
+ // Report the error (logs and
+ // eventual email to administrator)
+ SetErrorWhenReadingFilesystemObject(
+ rParams, filename.c_str());
+
+ // Ignore this entry for now.
+ continue;
+ }
+
+ if(st.st_dev != dir_st.st_dev)
+ {
+ rParams.GetProgressNotifier()
+ .NotifyMountPointSkipped(this,
+ filename);
+ continue;
+ }
+ #endif
+
checksum_info.mModificationTime = FileModificationTime(st);
checksum_info.mAttributeModificationTime = FileAttrModificationTime(st);
checksum_info.mSize = st.st_size;
@@ -310,8 +428,8 @@ void BackupClientDirectoryRecord::SyncDirectory(BackupClientDirectoryRecord::Syn
// Log that this has happened
if(!rParams.mHaveLoggedWarningAboutFutureFileTimes)
{
- ::syslog(LOG_ERR, "Some files have modification times excessively in the future. Check clock syncronisation.\n");
- ::syslog(LOG_ERR, "Example file (only one shown) : %s\n", filename.c_str());
+ rParams.GetProgressNotifier().NotifyFileModifiedInFuture(
+ this, filename);
rParams.mHaveLoggedWarningAboutFutureFileTimes = true;
}
}
@@ -549,8 +667,11 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
for(std::vector<std::string>::const_iterator f = rFiles.begin();
f != rFiles.end(); ++f)
{
+ // Send keep-alive message if needed
+ rParams.mrContext.DoKeepAlive();
+
// Filename of this file
- std::string filename(rLocalPath + DIRECTORY_SEPARATOR + *f);
+ std::string filename(MakeFullPath(rLocalPath, *f));
// Get relevant info about file
box_time_t modTime = 0;
@@ -564,7 +685,16 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
struct stat st;
if(::lstat(filename.c_str(), &st) != 0)
{
- THROW_EXCEPTION(CommonException, OSFileError)
+ rParams.GetProgressNotifier().NotifyFileStatFailed(this,
+ filename, strerror(errno));
+
+ // Report the error (logs and
+ // eventual email to administrator)
+ SetErrorWhenReadingFilesystemObject(rParams,
+ filename.c_str());
+
+ // Ignore this entry for now.
+ continue;
}
// Extract required data
@@ -683,34 +813,94 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// Need to update?
//
// Condition for upload:
- // modifiction time within sync period
+ // modification time within sync period
// if it's been seen before but not uploaded, is the time from this first sight longer than the MaxUploadWait
// and if we know about it from a directory listing, that it hasn't got the same upload time as on the store
- if(
- (
- // Check the file modified within the acceptable time period we're checking
- // If the file isn't on the server, the acceptable time starts at zero.
- // Check pDirOnStore and en, because if we didn't download a directory listing,
- // pDirOnStore will be zero, but we know it's on the server.
- ( ((pDirOnStore != 0 && en == 0) || (modTime >= rParams.mSyncPeriodStart)) && modTime < rParams.mSyncPeriodEnd)
-
- // However, just in case things are continually modified, we check the first seen time.
- // The two compares of syncPeriodEnd and pendingFirstSeenTime are because the values are unsigned.
- || (pendingFirstSeenTime != 0 &&
- (rParams.mSyncPeriodEnd > pendingFirstSeenTime)
- && ((rParams.mSyncPeriodEnd - pendingFirstSeenTime) > rParams.mMaxUploadWait))
-
- // Then make sure that if files are added with a time less than the sync period start
- // (which can easily happen on file server), it gets uploaded. The directory contents checksum
- // will pick up the fact it has been added, so the store listing will be available when this happens.
- || ((modTime <= rParams.mSyncPeriodStart) && (en != 0) && (en->GetModificationTime() != modTime))
-
- // And just to catch really badly off clocks in the future for file server clients,
- // just upload the file if it's madly in the future.
- || (modTime > rParams.mUploadAfterThisTimeInTheFuture)
- )
- // But even then, only upload it if the mod time locally is different to that on the server.
- && (en == 0 || en->GetModificationTime() != modTime))
+
+ bool doUpload = false;
+
+ // Only upload a file if the mod time locally is
+ // different to that on the server.
+
+ if (en == 0 || en->GetModificationTime() != modTime)
+ {
+ // Check the file modified within the acceptable time period we're checking
+ // If the file isn't on the server, the acceptable time starts at zero.
+ // Check pDirOnStore and en, because if we didn't download a directory listing,
+ // pDirOnStore will be zero, but we know it's on the server.
+ if (modTime < rParams.mSyncPeriodEnd)
+ {
+ if (pDirOnStore != 0 && en == 0)
+ {
+ doUpload = true;
+ BOX_TRACE(filename << ": will upload "
+ "(not on server)");
+ }
+ else if (modTime >= rParams.mSyncPeriodStart)
+ {
+ doUpload = true;
+ BOX_TRACE(filename << ": will upload "
+ "(modified since last sync)");
+ }
+ }
+
+ // However, just in case things are continually
+ // modified, we check the first seen time.
+ // The two compares of syncPeriodEnd and
+ // pendingFirstSeenTime are because the values
+ // are unsigned.
+
+ if (!doUpload &&
+ pendingFirstSeenTime != 0 &&
+ rParams.mSyncPeriodEnd > pendingFirstSeenTime &&
+ (rParams.mSyncPeriodEnd - pendingFirstSeenTime)
+ > rParams.mMaxUploadWait)
+ {
+ doUpload = true;
+ BOX_TRACE(filename << ": will upload "
+ "(continually modified)");
+ }
+
+ // Then make sure that if files are added with a
+ // time less than the sync period start
+ // (which can easily happen on file server), it
+ // gets uploaded. The directory contents checksum
+ // will pick up the fact it has been added, so the
+ // store listing will be available when this happens.
+
+ if (!doUpload &&
+ modTime <= rParams.mSyncPeriodStart &&
+ en != 0 &&
+ en->GetModificationTime() != modTime)
+ {
+ doUpload = true;
+ BOX_TRACE(filename << ": will upload "
+ "(mod time changed)");
+ }
+
+ // And just to catch really badly off clocks in
+ // the future for file server clients,
+ // just upload the file if it's madly in the future.
+
+ if (!doUpload && modTime >
+ rParams.mUploadAfterThisTimeInTheFuture)
+ {
+ doUpload = true;
+ BOX_TRACE(filename << ": will upload "
+ "(mod time in the future)");
+ }
+ }
+
+ if (!doUpload)
+ {
+ BOX_TRACE(filename << ": will not upload "
+ "(no reason to upload, mod time is "
+ << modTime << " versus sync period "
+ << rParams.mSyncPeriodStart << " to "
+ << rParams.mSyncPeriodEnd << ")");
+ }
+
+ if (doUpload)
{
// Make sure we're connected -- must connect here so we know whether
// the storage limit has been exceeded, and hence whether or not
@@ -735,6 +925,9 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
{
// Connection errors should just be passed on to the main handler, retries
// would probably just cause more problems.
+ rParams.GetProgressNotifier()
+ .NotifyFileUploadException(
+ this, filename, e);
throw;
}
catch(BoxException &e)
@@ -743,8 +936,9 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
allUpdatedSuccessfully = false;
// Log it.
SetErrorWhenReadingFilesystemObject(rParams, filename.c_str());
- // Log error.
- ::syslog(LOG_ERR, "Error code when uploading was (%d/%d), %s", e.GetType(), e.GetSubType(), e.what());
+ rParams.GetProgressNotifier()
+ .NotifyFileUploadException(
+ this, filename, e);
}
// Update structures if the file was uploaded successfully.
@@ -757,6 +951,11 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
}
}
+ else
+ {
+ rParams.GetProgressNotifier().NotifyFileSkippedServerFull(this,
+ filename);
+ }
}
else if(en != 0 && en->GetAttributesHash() != attributesHash)
{
@@ -838,6 +1037,9 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
}
}
}
+
+ rParams.GetProgressNotifier().NotifyFileSynchronised(this,
+ filename, fileSize);
}
// Erase contents of files to save space when recursing
@@ -855,8 +1057,11 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
for(std::vector<std::string>::const_iterator d = rDirs.begin();
d != rDirs.end(); ++d)
{
+ // Send keep-alive message if needed
+ rParams.mrContext.DoKeepAlive();
+
// Get the local filename
- std::string dirname(rLocalPath + DIRECTORY_SEPARATOR + *d);
+ std::string dirname(MakeFullPath(rLocalPath, *d));
// See if it's in the listing (if we have one)
BackupStoreFilenameClear storeFilename(*d);
@@ -893,11 +1098,15 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// In the list, just use this pointer
psubDirRecord = e->second;
}
- else if(!rParams.mrContext.StorageLimitExceeded()) // know we've got a connection if we get this far, as dir will have been modified.
+ else
{
- // Note: only think about adding directory records if there's space left on the server.
- // If there isn't, this step will be repeated when there is some available.
-
+ // Note: if we have exceeded our storage limit, then
+ // we should not upload any more data, nor create any
+ // DirectoryRecord representing data that would have
+ // been uploaded. This step will be repeated when
+ // there is some space available.
+ bool doCreateDirectoryRecord = true;
+
// Need to create the record. But do we need to create the directory on the server?
int64_t subDirObjectID = 0;
if(en != 0)
@@ -905,6 +1114,12 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
// No. Exists on the server, and we know about it from the listing.
subDirObjectID = en->GetObjectID();
}
+ else if(rParams.mrContext.StorageLimitExceeded())
+ // know we've got a connection if we get this far,
+ // as dir will have been modified.
+ {
+ doCreateDirectoryRecord = false;
+ }
else
{
// Yes, creation required!
@@ -987,20 +1202,23 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
haveJustCreatedDirOnServer = true;
}
}
-
- // New an object for this
- psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
-
- // Store in list
- try
- {
- mSubDirectories[*d] = psubDirRecord;
- }
- catch(...)
- {
- delete psubDirRecord;
- psubDirRecord = 0;
- throw;
+
+ if (doCreateDirectoryRecord)
+ {
+ // New an object for this
+ psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
+
+ // Store in list
+ try
+ {
+ mSubDirectories[*d] = psubDirRecord;
+ }
+ catch(...)
+ {
+ delete psubDirRecord;
+ psubDirRecord = 0;
+ throw;
+ }
}
}
@@ -1060,7 +1278,13 @@ bool BackupClientDirectoryRecord::UpdateItems(BackupClientDirectoryRecord::SyncP
BackupClientDirectoryRecord *rec = e->second;
mSubDirectories.erase(e);
delete rec;
- TRACE2("Deleted directory record for %s/%s\n", rLocalPath.c_str(), dirname.GetClearFilename().c_str());
+
+ std::string name = MakeFullPath(
+ rLocalPath,
+ dirname.GetClearFilename());
+
+ TRACE1("Deleted directory record for "
+ "%s\n", name.c_str());
}
}
}
@@ -1132,7 +1356,11 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(diffFromID != 0)
{
- // Found an old version -- get the index
+ // Found an old version
+ rParams.GetProgressNotifier().NotifyFileUploadingPatch(this,
+ rFilename);
+
+ // Get the index
std::auto_ptr<IOStream> blockIndexStream(connection.ReceiveStream());
//
@@ -1168,9 +1396,13 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(doNormalUpload)
{
// below threshold or nothing to diff from, so upload whole
+ rParams.GetProgressNotifier().NotifyFileUploading(this,
+ rFilename);
// Prepare to upload, getting a stream which will encode the file as we go along
- std::auto_ptr<IOStream> upload(BackupStoreFile::EncodeFile(rFilename.c_str(), mObjectID, rStoreFilename));
+ std::auto_ptr<IOStream> upload(
+ BackupStoreFile::EncodeFile(rFilename.c_str(),
+ mObjectID, rStoreFilename));
// Send to store
std::auto_ptr<BackupProtocolClientSuccess> stored(
@@ -1190,14 +1422,20 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
if(e.GetType() == ConnectionException::ExceptionType && e.GetSubType() == ConnectionException::Protocol_UnexpectedReply)
{
- // Check and see what error the protocol has -- as it might be an error...
+ // Check and see what error the protocol has,
+ // this is more useful to users than the exception.
int type, subtype;
- if(connection.GetLastError(type, subtype)
- && type == BackupProtocolClientError::ErrorType
- && subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
+ if(connection.GetLastError(type, subtype))
{
- // The hard limit was exceeded on the server, notify!
- rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ if(type == BackupProtocolClientError::ErrorType
+ && subtype == BackupProtocolClientError::Err_StorageLimitExceeded)
+ {
+ // The hard limit was exceeded on the server, notify!
+ rParams.mrDaemon.NotifySysadmin(BackupDaemon::NotifyEvent_StoreFull);
+ }
+ rParams.GetProgressNotifier()
+ .NotifyFileUploadServerError(
+ this, rFilename, type, subtype);
}
}
@@ -1205,6 +1443,8 @@ int64_t BackupClientDirectoryRecord::UploadFile(BackupClientDirectoryRecord::Syn
throw;
}
+ rParams.GetProgressNotifier().NotifyFileUploaded(this, rFilename, FileSize);
+
// Return the new object ID of this file
return objID;
}
@@ -1224,8 +1464,11 @@ void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClie
// Zero hash, so it gets synced properly next time round.
::memset(mStateChecksum, 0, sizeof(mStateChecksum));
- // Log the error
- ::syslog(LOG_ERR, "Backup object failed, error when reading %s", Filename);
+ // Log the error - already done by caller
+ /*
+ rParams.GetProgressNotifier().NotifyFileReadFailed(this,
+ Filename, strerror(errno));
+ */
// Mark that an error occured in the parameters object
rParams.mReadErrorsOnFilesystemObjects = true;
@@ -1241,8 +1484,10 @@ void BackupClientDirectoryRecord::SetErrorWhenReadingFilesystemObject(BackupClie
// Created: 8/3/04
//
// --------------------------------------------------------------------------
-BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext)
- : mSyncPeriodStart(0),
+BackupClientDirectoryRecord::SyncParams::SyncParams(BackupDaemon &rDaemon,
+ ProgressNotifier &rProgressNotifier, BackupClientContext &rContext)
+ : mrProgressNotifier(rProgressNotifier),
+ mSyncPeriodStart(0),
mSyncPeriodEnd(0),
mMaxUploadWait(0),
mMaxFileTimeInFuture(99999999999999999LL),
diff --git a/bin/bbackupd/BackupClientDirectoryRecord.h b/bin/bbackupd/BackupClientDirectoryRecord.h
index 2e192a96..3a33ed95 100644
--- a/bin/bbackupd/BackupClientDirectoryRecord.h
+++ b/bin/bbackupd/BackupClientDirectoryRecord.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -63,6 +63,82 @@ class BackupDaemon;
// --------------------------------------------------------------------------
//
// Class
+// Name: ProgressNotifier
+// Purpose: Provides methods for the backup library to inform the user
+// interface about its progress with the backup
+// Created: 2005/11/20
+//
+// --------------------------------------------------------------------------
+class BackupClientDirectoryRecord;
+
+class ProgressNotifier
+{
+ public:
+ virtual ~ProgressNotifier() { }
+ virtual void NotifyScanDirectory(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyDirStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyFileStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyDirListFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyMountPointSkipped(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyDirExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyUnsupportedFileType(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileReadFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg) = 0;
+ virtual void NotifyFileModifiedInFuture(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileSkippedServerFull(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploadException(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const BoxException& rException) = 0;
+ virtual void NotifyFileUploadServerError(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int type, int subtype) = 0;
+ virtual void NotifyFileUploading(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploadingPatch(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath) = 0;
+ virtual void NotifyFileUploaded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize) = 0;
+ virtual void NotifyFileSynchronised(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize) = 0;
+};
+
+// --------------------------------------------------------------------------
+//
+// Class
// Name: BackupClientDirectoryRecord
// Purpose: Implementation of record about directory for backup client
// Created: 2003/10/08
@@ -97,14 +173,18 @@ public:
class SyncParams
{
public:
- SyncParams(BackupDaemon &rDaemon, BackupClientContext &rContext);
+ SyncParams(
+ BackupDaemon &rDaemon,
+ ProgressNotifier &rProgressNotifier,
+ BackupClientContext &rContext);
~SyncParams();
private:
// No copying
SyncParams(const SyncParams&);
SyncParams &operator=(const SyncParams&);
+ ProgressNotifier &mrProgressNotifier;
+
public:
-
// Data members are public, as accessors are not justified here
box_time_t mSyncPeriodStart;
box_time_t mSyncPeriodEnd;
@@ -119,6 +199,11 @@ public:
// Member variables modified by syncing process
box_time_t mUploadAfterThisTimeInTheFuture;
bool mHaveLoggedWarningAboutFutureFileTimes;
+
+ ProgressNotifier& GetProgressNotifier() const
+ {
+ return mrProgressNotifier;
+ }
};
void SyncDirectory(SyncParams &rParams, int64_t ContainingDirectoryID, const std::string &rLocalPath,
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.cpp b/bin/bbackupd/BackupClientInodeToIDMap.cpp
index 5ffe98f9..eb2842b1 100644
--- a/bin/bbackupd/BackupClientInodeToIDMap.cpp
+++ b/bin/bbackupd/BackupClientInodeToIDMap.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
diff --git a/bin/bbackupd/BackupClientInodeToIDMap.h b/bin/bbackupd/BackupClientInodeToIDMap.h
index fdf77cba..806bf964 100644
--- a/bin/bbackupd/BackupClientInodeToIDMap.h
+++ b/bin/bbackupd/BackupClientInodeToIDMap.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
diff --git a/bin/bbackupd/BackupDaemon.cpp b/bin/bbackupd/BackupDaemon.cpp
index 5db7cb8b..24fa0a24 100644
--- a/bin/bbackupd/BackupDaemon.cpp
+++ b/bin/bbackupd/BackupDaemon.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -56,9 +56,6 @@
#ifdef HAVE_SIGNAL_H
#include <signal.h>
#endif
-#ifdef HAVE_SYSLOG_H
- #include <syslog.h>
-#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
@@ -79,6 +76,8 @@
#include <process.h>
#endif
+#include <iostream>
+
#include "Configuration.h"
#include "IOStream.h"
#include "MemBlockStream.h"
@@ -97,6 +96,7 @@
#include "BackupStoreFilenameClear.h"
#include "BackupClientInodeToIDMap.h"
#include "autogen_BackupProtocolClient.h"
+#include "autogen_ConversionException.h"
#include "BackupClientCryptoKeys.h"
#include "BannerText.h"
#include "BackupStoreFile.h"
@@ -112,6 +112,16 @@
#include "IOStreamGetLine.h"
#include "Conversion.h"
#include "Archive.h"
+#include "Timer.h"
+#include "Logging.h"
+#include "autogen_ClientException.h"
+
+#ifdef WIN32
+ #include "Win32ServiceFunctions.h"
+ #include "Win32BackupService.h"
+
+ extern Win32BackupService* gpDaemonService;
+#endif
#include "MemLeakFindOn.h"
@@ -151,30 +161,51 @@ unsigned int WINAPI HelperThread(LPVOID lpParam)
BackupDaemon::BackupDaemon()
: mState(BackupDaemon::State_Initialising),
mpCommandSocketInfo(0),
- mDeleteUnusedRootDirEntriesAfter(0)
+ mDeleteUnusedRootDirEntriesAfter(0),
+ mLogAllFileAccess(false)
+ #ifdef WIN32
+ , mInstallService(false),
+ mRemoveService(false),
+ mRunAsService(false),
+ mServiceName("bbackupd")
+ #endif
{
// Only ever one instance of a daemon
SSLLib::Initialise();
- // Initialise notifcation sent status
- for(int l = 0; l <= NotifyEvent__MAX; ++l)
+ // Initialise notification sent status
+ for(int l = 0; l < NotifyEvent__MAX; ++l)
{
mNotificationsSent[l] = false;
}
-#ifdef WIN32
- // Create a thread to handle the named pipe
- HANDLE hThread;
- unsigned int dwThreadId;
-
- hThread = (HANDLE) _beginthreadex(
- NULL, // default security attributes
- 0, // use default stack size
- HelperThread, // thread function
- this, // argument to thread function
- 0, // use default creation flags
- &dwThreadId); // returns the thread identifier
-#endif
+ #ifdef WIN32
+ // Create the event object to signal from main thread to
+ // worker when new messages are queued to be sent to the
+ // command socket.
+
+ mhMessageToSendEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if(mhMessageToSendEvent == INVALID_HANDLE_VALUE)
+ {
+ BOX_ERROR("Failed to create event object: error " <<
+ GetLastError());
+ exit(1);
+ }
+
+ // Create the event object to signal from worker to main thread
+ // when a command has been received on the command socket.
+
+ mhCommandReceivedEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if(mhCommandReceivedEvent == INVALID_HANDLE_VALUE)
+ {
+ BOX_ERROR("Failed to create event object: error " <<
+ GetLastError());
+ exit(1);
+ }
+
+ // Create the critical section to protect the message queue
+ InitializeCriticalSection(&mMessageQueueLock);
+ #endif
}
// --------------------------------------------------------------------------
@@ -219,13 +250,21 @@ const char *BackupDaemon::DaemonName() const
// Created: 1/1/04
//
// --------------------------------------------------------------------------
-const char *BackupDaemon::DaemonBanner() const
+std::string BackupDaemon::DaemonBanner() const
{
-#ifndef NDEBUG
- // Don't display banner in debug builds
- return 0;
-#else
return BANNER_TEXT("Backup Client");
+}
+
+void BackupDaemon::Usage()
+{
+ this->Daemon::Usage();
+
+#ifdef WIN32
+ std::cout <<
+ " -s Run as a Windows Service, for internal use only\n"
+ " -i Install Windows Service (you may want to specify a config file)\n"
+ " -r Remove Windows Service\n"
+ " -S <name> Service name for -i and -r options\n";
#endif
}
@@ -260,7 +299,7 @@ void BackupDaemon::SetupInInitialProcess()
// Print a warning on this platform if the CommandSocket is used.
if(GetConfiguration().KeyExists("CommandSocket"))
{
- printf(
+ BOX_WARNING(
"==============================================================================\n"
"SECURITY WARNING: This platform cannot check the credentials of connections to\n"
"the command socket. This is a potential DoS security problem.\n"
@@ -298,26 +337,133 @@ void BackupDaemon::DeleteAllLocations()
}
#ifdef WIN32
+std::string BackupDaemon::GetOptionString()
+{
+ std::string oldOpts = this->Daemon::GetOptionString();
+ ASSERT(oldOpts.find("s") == std::string::npos);
+ ASSERT(oldOpts.find("S") == std::string::npos);
+ ASSERT(oldOpts.find("i") == std::string::npos);
+ ASSERT(oldOpts.find("r") == std::string::npos);
+ return oldOpts + "sS:ir";
+}
+
+int BackupDaemon::ProcessOption(signed int option)
+{
+ switch(option)
+ {
+ case 's':
+ {
+ mRunAsService = true;
+ return 0;
+ }
+
+ case 'S':
+ {
+ mServiceName = optarg;
+ return 0;
+ }
+
+ case 'i':
+ {
+ mInstallService = true;
+ return 0;
+ }
+
+ case 'r':
+ {
+ mRemoveService = true;
+ return 0;
+ }
+
+ default:
+ {
+ return this->Daemon::ProcessOption(option);
+ }
+ }
+}
+
+int BackupDaemon::Main(const std::string &rConfigFileName)
+{
+ if (mInstallService)
+ {
+ return InstallService(rConfigFileName.c_str(), mServiceName);
+ }
+
+ if (mRemoveService)
+ {
+ return RemoveService(mServiceName);
+ }
+
+ Logging::SetProgramName("Box Backup (" + mServiceName + ")");
+
+ int returnCode;
+
+ if (mRunAsService)
+ {
+ // We will be called reentrantly by the Service Control
+ // Manager, and we had better not call OurService again!
+ mRunAsService = false;
+
+ BOX_INFO("Box Backup service starting");
+ returnCode = OurService(rConfigFileName.c_str());
+ BOX_INFO("Box Backup service shut down");
+ }
+ else
+ {
+ returnCode = this->Daemon::Main(rConfigFileName);
+ }
+
+ return returnCode;
+}
+
void BackupDaemon::RunHelperThread(void)
{
+ const Configuration &conf(GetConfiguration());
mpCommandSocketInfo = new CommandSocketInfo;
- this->mReceivedCommandConn = false;
+ WinNamedPipeStream& rSocket(mpCommandSocketInfo->mListeningSocket);
- // loop until the parent process exits
- while (TRUE)
+ // loop until the parent process exits, or we decide
+ // to kill the thread ourselves
+ while (!IsTerminateWanted())
{
try
{
- mpCommandSocketInfo->mListeningSocket.Accept(
- BOX_NAMED_PIPE_NAME);
+ std::string socket = conf.GetKeyValue("CommandSocket");
+ rSocket.Accept(socket);
+ }
+ catch (BoxException &e)
+ {
+ BOX_ERROR("Failed to open command socket: " <<
+ e.what());
+ SetTerminateWanted();
+ break; // this is fatal to listening thread
+ }
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Failed to open command socket: " <<
+ e.what());
+ SetTerminateWanted();
+ break; // this is fatal to listening thread
+ }
+ catch(...)
+ {
+ BOX_ERROR("Failed to open command socket: "
+ "unknown error");
+ SetTerminateWanted();
+ break; // this is fatal to listening thread
+ }
+
+ try
+ {
+ // Errors here do not kill the thread,
+ // only the current connection.
// This next section comes from Ben's original function
// Log
- ::syslog(LOG_INFO, "Connection from command socket");
+ BOX_INFO("Connection from command socket");
// Send a header line summarising the configuration
// and current state
- const Configuration &conf(GetConfiguration());
char summary[256];
size_t summarySize = sprintf(summary,
"bbackupd: %d %d %d %d\nstate %d\n",
@@ -327,17 +473,76 @@ void BackupDaemon::RunHelperThread(void)
conf.GetKeyValueInt("MaxUploadWait"),
mState);
- mpCommandSocketInfo->mListeningSocket.Write(summary, summarySize);
- mpCommandSocketInfo->mListeningSocket.Write("ping\n", 5);
+ rSocket.Write(summary, summarySize);
+ rSocket.Write("ping\n", 5);
+
+ // old queued messages are not useful
+ EnterCriticalSection(&mMessageQueueLock);
+ mMessageList.clear();
+ ResetEvent(mhMessageToSendEvent);
+ LeaveCriticalSection(&mMessageQueueLock);
- IOStreamGetLine readLine(mpCommandSocketInfo->mListeningSocket);
+ IOStreamGetLine readLine(rSocket);
std::string command;
- while (mpCommandSocketInfo->mListeningSocket.IsConnected() &&
- readLine.GetLine(command) )
+ while (rSocket.IsConnected() && !IsTerminateWanted())
{
- TRACE1("Receiving command '%s' over "
- "command socket\n", command.c_str());
+ HANDLE handles[2];
+ handles[0] = mhMessageToSendEvent;
+ handles[1] = rSocket.GetReadableEvent();
+
+ BOX_TRACE("Received command '" << command
+ << "' over command socket");
+
+ DWORD result = WaitForMultipleObjects(
+ sizeof(handles)/sizeof(*handles),
+ handles, FALSE, 1000);
+
+ if(result == 0)
+ {
+ ResetEvent(mhMessageToSendEvent);
+
+ EnterCriticalSection(&mMessageQueueLock);
+ try
+ {
+ while (mMessageList.size() > 0)
+ {
+ std::string message = *(mMessageList.begin());
+ mMessageList.erase(mMessageList.begin());
+ printf("Sending '%s' to waiting client... ", message.c_str());
+ message += "\n";
+ rSocket.Write(message.c_str(),
+ message.length());
+
+ printf("done.\n");
+ }
+ }
+ catch (...)
+ {
+ LeaveCriticalSection(&mMessageQueueLock);
+ throw;
+ }
+ LeaveCriticalSection(&mMessageQueueLock);
+ continue;
+ }
+ else if(result == WAIT_TIMEOUT)
+ {
+ continue;
+ }
+ else if(result != 1)
+ {
+ BOX_ERROR("WaitForMultipleObjects returned invalid result " << result);
+ continue;
+ }
+
+ if(!readLine.GetLine(command))
+ {
+ BOX_ERROR("Failed to read line");
+ continue;
+ }
+
+ BOX_INFO("Received command " << command <<
+ " from client");
bool sendOK = false;
bool sendResponse = true;
@@ -356,6 +561,7 @@ void BackupDaemon::RunHelperThread(void)
this->mDoSyncFlagOut = true;
this->mSyncIsForcedOut = false;
sendOK = true;
+ SetEvent(mhCommandReceivedEvent);
}
else if(command == "force-sync")
{
@@ -363,48 +569,65 @@ void BackupDaemon::RunHelperThread(void)
this->mDoSyncFlagOut = true;
this->mSyncIsForcedOut = true;
sendOK = true;
+ SetEvent(mhCommandReceivedEvent);
}
else if(command == "reload")
{
// Reload the configuration
SetReloadConfigWanted();
sendOK = true;
+ SetEvent(mhCommandReceivedEvent);
}
else if(command == "terminate")
{
// Terminate the daemon cleanly
SetTerminateWanted();
sendOK = true;
+ SetEvent(mhCommandReceivedEvent);
+ }
+ else
+ {
+ BOX_ERROR("Received unknown command "
+ "'" << command << "' "
+ "from client");
+ sendResponse = true;
+ sendOK = false;
}
// Send a response back?
- if (sendResponse)
+ if(sendResponse)
{
const char* response = sendOK ? "ok\n" : "error\n";
- mpCommandSocketInfo->mListeningSocket.Write(
+ rSocket.Write(
response, strlen(response));
}
- if (disconnect)
+ if(disconnect)
{
break;
}
-
- this->mReceivedCommandConn = true;
}
- mpCommandSocketInfo->mListeningSocket.Close();
+ rSocket.Close();
}
- catch (BoxException &e)
+ catch(BoxException &e)
+ {
+ BOX_ERROR("Communication error with "
+ "control client: " << e.what());
+ }
+ catch(std::exception &e)
{
- ::syslog(LOG_ERR, "Communication error with "
- "control client: %s", e.what());
+ BOX_ERROR("Internal error in command socket "
+ "thread: " << e.what());
}
- catch (...)
+ catch(...)
{
- ::syslog(LOG_ERR, "Communication error with control client");
+ BOX_ERROR("Communication error with control client");
}
}
+
+ CloseHandle(mhCommandReceivedEvent);
+ CloseHandle(mhMessageToSendEvent);
}
#endif
@@ -418,35 +641,39 @@ void BackupDaemon::RunHelperThread(void)
// --------------------------------------------------------------------------
void BackupDaemon::Run()
{
-#ifdef WIN32
- // init our own timer for file diff timeouts
- InitTimer();
-
- try
- {
- Run2();
- }
- catch(...)
- {
- FiniTimer();
- throw;
- }
-
- FiniTimer();
-#else // ! WIN32
- // Ignore SIGPIPE (so that if a command connection is broken, the daemon doesn't terminate)
- ::signal(SIGPIPE, SIG_IGN);
-
- // Create a command socket?
- const Configuration &conf(GetConfiguration());
- if(conf.KeyExists("CommandSocket"))
- {
- // Yes, create a local UNIX socket
- mpCommandSocketInfo = new CommandSocketInfo;
- const char *socketName = conf.GetKeyValue("CommandSocket").c_str();
- ::unlink(socketName);
- mpCommandSocketInfo->mListeningSocket.Listen(Socket::TypeUNIX, socketName);
- }
+ // initialise global timer mechanism
+ Timers::Init();
+
+ #ifdef WIN32
+ // Create a thread to handle the named pipe
+ HANDLE hThread;
+ unsigned int dwThreadId;
+
+ hThread = (HANDLE) _beginthreadex(
+ NULL, // default security attributes
+ 0, // use default stack size
+ HelperThread, // thread function
+ this, // argument to thread function
+ 0, // use default creation flags
+ &dwThreadId); // returns the thread identifier
+ #else
+ // Ignore SIGPIPE so that if a command connection is broken,
+ // the daemon doesn't terminate.
+ ::signal(SIGPIPE, SIG_IGN);
+
+ // Create a command socket?
+ const Configuration &conf(GetConfiguration());
+ if(conf.KeyExists("CommandSocket"))
+ {
+ // Yes, create a local UNIX socket
+ mpCommandSocketInfo = new CommandSocketInfo;
+ const char *socketName =
+ conf.GetKeyValue("CommandSocket").c_str();
+ ::unlink(socketName);
+ mpCommandSocketInfo->mListeningSocket.Listen(
+ Socket::TypeUNIX, socketName);
+ }
+ #endif // !WIN32
// Handle things nicely on exceptions
try
@@ -455,31 +682,47 @@ void BackupDaemon::Run()
}
catch(...)
{
+ #ifdef WIN32
+ // Don't delete the socket, as the helper thread
+ // is probably still using it. Let Windows clean
+ // up after us.
+ #else
if(mpCommandSocketInfo != 0)
{
try
{
delete mpCommandSocketInfo;
}
+ catch(std::exception &e)
+ {
+ BOX_WARNING("Internal error while "
+ "closing command socket after "
+ "another exception: " << e.what());
+ }
catch(...)
{
- ::syslog(LOG_WARNING,
- "Error closing command socket "
+ BOX_WARNING("Error closing command socket "
"after exception, ignored.");
}
mpCommandSocketInfo = 0;
}
+ #endif // WIN32
+ Timers::Cleanup();
+
throw;
}
- // Clean up
- if(mpCommandSocketInfo != 0)
- {
- delete mpCommandSocketInfo;
- mpCommandSocketInfo = 0;
- }
-#endif
+ #ifndef WIN32
+ // Clean up
+ if(mpCommandSocketInfo != 0)
+ {
+ delete mpCommandSocketInfo;
+ mpCommandSocketInfo = 0;
+ }
+ #endif
+
+ Timers::Cleanup();
}
// --------------------------------------------------------------------------
@@ -503,18 +746,20 @@ void BackupDaemon::Run2()
// Set up the keys for various things
BackupClientCryptoKeys_Setup(conf.GetKeyValue("KeysFile").c_str());
+ // Setup various timings
+ int maximumDiffingTime = 600;
+ int keepAliveTime = 60;
+
// max diffing time, keep-alive time
if(conf.KeyExists("MaximumDiffingTime"))
{
- BackupClientContext::SetMaximumDiffingTime(conf.GetKeyValueInt("MaximumDiffingTime"));
+ maximumDiffingTime = conf.GetKeyValueInt("MaximumDiffingTime");
}
if(conf.KeyExists("KeepAliveTime"))
{
- BackupClientContext::SetKeepAliveTime(conf.GetKeyValueInt("KeepAliveTime"));
+ keepAliveTime = conf.GetKeyValueInt("KeepAliveTime");
}
- // Setup various timings
-
// How often to connect to the store (approximate)
box_time_t updateStoreInterval = SecondsToBoxTime(conf.GetKeyValueInt("UpdateStoreInterval"));
@@ -542,8 +787,8 @@ void BackupDaemon::Run2()
BackupClientContext::ClientStoreMarker_NotKnown;
// haven't contacted the store yet
- bool deserialised = DeserializeStoreObjectInfo(clientStoreMarker,
- lastSyncTime, nextSyncTime);
+ bool deleteStoreObjectInfoFile = DeserializeStoreObjectInfo(
+ clientStoreMarker, lastSyncTime, nextSyncTime);
// --------------------------------------------------------------------------------------------
@@ -564,38 +809,64 @@ void BackupDaemon::Run2()
box_time_t currentTime;
do
{
- // Need to check the stop run thing here too, so this loop isn't run if we should be stopping
+ // Check whether we should be stopping,
+ // and don't run a sync if so.
if(StopRun()) break;
currentTime = GetCurrentBoxTime();
- // Pause a while, but no more than MAX_SLEEP_TIME seconds (use the conditional because times are unsigned)
- box_time_t requiredDelay = (nextSyncTime < currentTime)?(0):(nextSyncTime - currentTime);
- // If there isn't automatic backup happening, set a long delay. And limit delays at the same time.
- if(!automaticBackup || requiredDelay > SecondsToBoxTime(MAX_SLEEP_TIME))
- requiredDelay = SecondsToBoxTime(MAX_SLEEP_TIME);
+ // Pause a while, but no more than
+ // MAX_SLEEP_TIME seconds (use the conditional
+ // because times are unsigned)
+ box_time_t requiredDelay =
+ (nextSyncTime < currentTime)
+ ? (0)
+ : (nextSyncTime - currentTime);
+
+ // If there isn't automatic backup happening,
+ // set a long delay. And limit delays at the
+ // same time.
+ if(!automaticBackup || requiredDelay >
+ SecondsToBoxTime(MAX_SLEEP_TIME))
+ {
+ requiredDelay = SecondsToBoxTime(
+ MAX_SLEEP_TIME);
+ }
- // Only do the delay if there is a delay required
+ // Only delay if necessary
if(requiredDelay > 0)
{
- // Sleep somehow. There are choices on how this should be done, depending on the state of the control connection
+ // Sleep somehow. There are choices
+ // on how this should be done,
+ // depending on the state of the
+ // control connection
if(mpCommandSocketInfo != 0)
{
- // A command socket exists, so sleep by handling connections with it
- WaitOnCommandSocket(requiredDelay, doSync, doSyncForcedByCommand);
+ // A command socket exists,
+ // so sleep by waiting on it
+ WaitOnCommandSocket(
+ requiredDelay, doSync,
+ doSyncForcedByCommand);
}
else
{
- // No command socket or connection, just do a normal sleep
- time_t sleepSeconds = BoxTimeToSeconds(requiredDelay);
- ::sleep((sleepSeconds <= 0)?1:sleepSeconds);
+ // No command socket or
+ // connection, just do a
+ // normal sleep
+ time_t sleepSeconds =
+ BoxTimeToSeconds(
+ requiredDelay);
+ ::sleep((sleepSeconds <= 0)
+ ? 1
+ : sleepSeconds);
}
}
} while((!automaticBackup || (currentTime < nextSyncTime)) && !doSync && !StopRun());
}
- // Time of sync start, and if it's time for another sync (and we're doing automatic syncs), set the flag
+ // Time of sync start, and if it's time for another sync
+ // (and we're doing automatic syncs), set the flag
box_time_t currentSyncStartTime = GetCurrentBoxTime();
if(automaticBackup && currentSyncStartTime >= nextSyncTime)
{
@@ -609,12 +880,14 @@ void BackupDaemon::Run2()
if(d > 0)
{
// Script has asked for a delay
- nextSyncTime = GetCurrentBoxTime() + SecondsToBoxTime(d);
+ nextSyncTime = GetCurrentBoxTime() +
+ SecondsToBoxTime(d);
doSync = false;
}
}
- // Ready to sync? (but only if we're not supposed to be stopping)
+ // Ready to sync? (but only if we're not supposed
+ // to be stopping)
if(doSync && !StopRun())
{
// Touch a file to record times in filesystem
@@ -628,34 +901,70 @@ void BackupDaemon::Run2()
// Calculate the sync period of files to examine
box_time_t syncPeriodStart = lastSyncTime;
- box_time_t syncPeriodEnd = currentSyncStartTime - minimumFileAge;
+ box_time_t syncPeriodEnd = currentSyncStartTime -
+ minimumFileAge;
+
+ if(syncPeriodStart >= syncPeriodEnd &&
+ syncPeriodStart - syncPeriodEnd < minimumFileAge)
+ {
+ // This can happen if we receive a force-sync
+ // command less than minimumFileAge after
+ // the last sync. Deal with it by moving back
+ // syncPeriodStart, which should not do any
+ // damage.
+ syncPeriodStart = syncPeriodEnd -
+ SecondsToBoxTime(1);
+ }
+
+ if(syncPeriodStart >= syncPeriodEnd)
+ {
+ BOX_ERROR("Invalid (negative) sync period: "
+ "perhaps your clock is going "
+ "backwards (" << syncPeriodStart <<
+ " to " << syncPeriodEnd << ")");
+ THROW_EXCEPTION(ClientException,
+ ClockWentBackwards);
+ }
+
// Check logic
ASSERT(syncPeriodEnd > syncPeriodStart);
// Paranoid check on sync times
if(syncPeriodStart >= syncPeriodEnd) continue;
- // Adjust syncPeriodEnd to emulate snapshot behaviour properly
+ // Adjust syncPeriodEnd to emulate snapshot
+ // behaviour properly
box_time_t syncPeriodEndExtended = syncPeriodEnd;
// Using zero min file age?
if(minimumFileAge == 0)
{
- // Add a year on to the end of the end time, to make sure we sync
- // files which are modified after the scan run started.
- // Of course, they may be eligable to be synced again the next time round,
- // but this should be OK, because the changes only upload should upload no data.
- syncPeriodEndExtended += SecondsToBoxTime((time_t)(356*24*3600));
+ // Add a year on to the end of the end time,
+ // to make sure we sync files which are
+ // modified after the scan run started.
+ // Of course, they may be eligible to be
+ // synced again the next time round,
+ // but this should be OK, because the changes
+ // only upload should upload no data.
+ syncPeriodEndExtended += SecondsToBoxTime(
+ (time_t)(356*24*3600));
}
// Delete the serialised store object file,
// so that we don't try to reload it after a
// partially completed backup
- if(deserialised && !DeleteStoreObjectInfo())
+ if(deleteStoreObjectInfoFile &&
+ !DeleteStoreObjectInfo())
{
- ::syslog(LOG_ERR, "Failed to delete the "
+ BOX_ERROR("Failed to delete the "
"StoreObjectInfoFile, backup cannot "
"continue safely.");
- continue;
+ THROW_EXCEPTION(ClientException,
+ FailedToDeleteStoreObjectInfoFile);
}
+
+ // In case the backup throws an exception,
+ // we should not try to delete the store info
+ // object file again.
+ deleteStoreObjectInfoFile = false;
// Do sync
bool errorOccurred = false;
@@ -666,30 +975,73 @@ void BackupDaemon::Run2()
{
// Set state and log start
SetState(State_Connected);
- ::syslog(LOG_INFO, "Beginning scan of local files");
+ BOX_NOTICE("Beginning scan of local files");
- // Then create a client context object (don't just connect, as this may be unnecessary)
- BackupClientContext clientContext(*this, tlsContext, conf.GetKeyValue("StoreHostname"),
- conf.GetKeyValueInt("AccountNumber"), conf.GetKeyValueBool("ExtendedLogging"));
+ std::string extendedLogFile;
+ if (conf.KeyExists("ExtendedLogFile"))
+ {
+ extendedLogFile = conf.GetKeyValue(
+ "ExtendedLogFile");
+ }
+
+ if (conf.KeyExists("LogAllFileAccess"))
+ {
+ mLogAllFileAccess =
+ conf.GetKeyValueBool(
+ "LogAllFileAccess");
+ }
+
+ // Then create a client context object (don't
+ // just connect, as this may be unnecessary)
+ BackupClientContext clientContext
+ (
+ *this,
+ tlsContext,
+ conf.GetKeyValue("StoreHostname"),
+ conf.GetKeyValueInt("AccountNumber"),
+ conf.GetKeyValueBool("ExtendedLogging"),
+ conf.KeyExists("ExtendedLogFile"),
+ extendedLogFile
+ );
// Set up the sync parameters
- BackupClientDirectoryRecord::SyncParams params(*this, clientContext);
+ BackupClientDirectoryRecord::SyncParams params(
+ *this, *this, clientContext);
params.mSyncPeriodStart = syncPeriodStart;
- params.mSyncPeriodEnd = syncPeriodEndExtended; // use potentially extended end time
+ params.mSyncPeriodEnd = syncPeriodEndExtended;
+ // use potentially extended end time
params.mMaxUploadWait = maxUploadWait;
- params.mFileTrackingSizeThreshold = conf.GetKeyValueInt("FileTrackingSizeThreshold");
- params.mDiffingUploadSizeThreshold = conf.GetKeyValueInt("DiffingUploadSizeThreshold");
- params.mMaxFileTimeInFuture = SecondsToBoxTime(conf.GetKeyValueInt("MaxFileTimeInFuture"));
+ params.mFileTrackingSizeThreshold =
+ conf.GetKeyValueInt(
+ "FileTrackingSizeThreshold");
+ params.mDiffingUploadSizeThreshold =
+ conf.GetKeyValueInt(
+ "DiffingUploadSizeThreshold");
+ params.mMaxFileTimeInFuture =
+ SecondsToBoxTime(
+ conf.GetKeyValueInt(
+ "MaxFileTimeInFuture"));
+ mDeleteRedundantLocationsAfter =
+ conf.GetKeyValueInt(
+ "DeleteRedundantLocationsAfter");
+
+ clientContext.SetMaximumDiffingTime(maximumDiffingTime);
+ clientContext.SetKeepAliveTime(keepAliveTime);
// Set store marker
clientContext.SetClientStoreMarker(clientStoreMarker);
- // Set up the locations, if necessary -- need to do it here so we have a (potential) connection to use
+ // Set up the locations, if necessary --
+ // need to do it here so we have a
+ // (potential) connection to use
if(mLocations.empty())
{
- const Configuration &locations(conf.GetSubConfiguration("BackupLocations"));
+ const Configuration &locations(
+ conf.GetSubConfiguration(
+ "BackupLocations"));
- // Make sure all the directory records are set up
+ // Make sure all the directory records
+ // are set up
SetupLocations(clientContext, locations);
}
@@ -699,17 +1051,29 @@ void BackupDaemon::Run2()
// Delete any unused directories?
DeleteUnusedRootDirEntries(clientContext);
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_BackupStart);
+
// Go through the records, syncing them
- for(std::vector<Location *>::const_iterator i(mLocations.begin()); i != mLocations.end(); ++i)
+ for(std::vector<Location *>::const_iterator
+ i(mLocations.begin());
+ i != mLocations.end(); ++i)
{
- // Set current and new ID map pointers in the context
+ // Set current and new ID map pointers
+ // in the context
clientContext.SetIDMaps(mCurrentIDMaps[(*i)->mIDMapIndex], mNewIDMaps[(*i)->mIDMapIndex]);
- // Set exclude lists (context doesn't take ownership)
- clientContext.SetExcludeLists((*i)->mpExcludeFiles, (*i)->mpExcludeDirs);
+ // Set exclude lists (context doesn't
+ // take ownership)
+ clientContext.SetExcludeLists(
+ (*i)->mpExcludeFiles,
+ (*i)->mpExcludeDirs);
// Sync the directory
- (*i)->mpDirectoryRecord->SyncDirectory(params, BackupProtocolClientListDirectory::RootDirectory, (*i)->mPath);
+ (*i)->mpDirectoryRecord->SyncDirectory(
+ params,
+ BackupProtocolClientListDirectory::RootDirectory,
+ (*i)->mPath);
// Unset exclude lists (just in case)
clientContext.SetExcludeLists(0, 0);
@@ -723,13 +1087,14 @@ void BackupDaemon::Run2()
}
else
{
- // Unset the read error flag, so the error is
- // reported again in the future
+ // Unset the read error flag, so the // error is reported again if it
+ // happens again
mNotificationsSent[NotifyEvent_ReadError] = false;
}
- // Perform any deletions required -- these are delayed until the end
- // to allow renaming to happen neatly.
+ // Perform any deletions required -- these are
+ // delayed until the end to allow renaming to
+ // happen neatly.
clientContext.PerformDeletions();
// Close any open connection
@@ -746,26 +1111,45 @@ void BackupDaemon::Run2()
}
else
{
- // The start time of the next run is the end time of this run
- // This is only done if the storage limit wasn't exceeded (as things won't have been done properly if it was)
+ // The start time of the next run is
+ // the end time of this run.
+ // This is only done if the storage
+ // limit wasn't exceeded (as things
+ // won't have been done properly if
+ // it was)
lastSyncTime = syncPeriodEnd;
- // unflag the storage full notify flag so that next time the store is full, and alert will be sent
+
+ // unflag the storage full notify flag
+ // so that next time the store is full,
+ // an alert will be sent
mNotificationsSent[NotifyEvent_StoreFull] = false;
}
// Calculate when the next sync run should be
- nextSyncTime = currentSyncStartTime + updateStoreInterval + Random::RandomInt(updateStoreInterval >> SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
+ nextSyncTime = currentSyncStartTime +
+ updateStoreInterval +
+ Random::RandomInt(updateStoreInterval >>
+ SYNC_PERIOD_RANDOM_EXTRA_TIME_SHIFT_BY);
// Commit the ID Maps
CommitIDMapsAfterSync();
// Log
- ::syslog(LOG_INFO, "Finished scan of local files");
+ BOX_NOTICE("Finished scan of local files");
+
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_BackupFinish);
// --------------------------------------------------------------------------------------------
- // We had a successful backup, save the store info
- SerializeStoreObjectInfo(clientStoreMarker, lastSyncTime, nextSyncTime);
+ // We had a successful backup, save the store
+ // info. If we save successfully, we must
+ // delete the file next time we start a backup
+
+ deleteStoreObjectInfoFile =
+ SerializeStoreObjectInfo(
+ clientStoreMarker,
+ lastSyncTime, nextSyncTime);
// --------------------------------------------------------------------------------------------
}
@@ -776,17 +1160,31 @@ void BackupDaemon::Run2()
errorCode = e.GetType();
errorSubCode = e.GetSubType();
}
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error during "
+ "backup run: " << e.what());
+ errorOccurred = true;
+ errorString = e.what();
+ }
catch(...)
{
- // TODO: better handling of exceptions here... need to be very careful
+ // TODO: better handling of exceptions here...
+ // need to be very careful
errorOccurred = true;
}
if(errorOccurred)
{
// Is it a berkely db failure?
- bool isBerkelyDbFailure = (errorCode == BackupStoreException::ExceptionType
- && errorSubCode == BackupStoreException::BerkelyDBFailure);
+ bool isBerkelyDbFailure = false;
+
+ if (errorCode == BackupStoreException::ExceptionType
+ && errorSubCode == BackupStoreException::BerkelyDBFailure)
+ {
+ isBerkelyDbFailure = true;
+ }
+
if(isBerkelyDbFailure)
{
// Delete corrupt files
@@ -794,7 +1192,8 @@ void BackupDaemon::Run2()
}
// Clear state data
- syncPeriodStart = 0; // go back to beginning of time
+ syncPeriodStart = 0;
+ // go back to beginning of time
clientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown; // no store marker, so download everything
DeleteAllLocations();
DeleteAllIDMaps();
@@ -802,26 +1201,30 @@ void BackupDaemon::Run2()
// Handle restart?
if(StopRun())
{
- ::syslog(LOG_INFO, "Exception (%d/%d) due to signal", errorCode, errorSubCode);
+ BOX_NOTICE("Exception (" << errorCode
+ << "/" << errorSubCode
+ << ") due to signal");
return;
}
// If the Berkely db files get corrupted, delete them and try again immediately
if(isBerkelyDbFailure)
{
- ::syslog(LOG_ERR, "Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.\n");
+ BOX_ERROR("Berkely db inode map files corrupted, deleting and restarting scan. Renamed files and directories will not be tracked until after this scan.");
::sleep(1);
}
else
{
// Not restart/terminate, pause and retry
+ // Notify administrator
+ NotifySysadmin(NotifyEvent_BackupError);
SetState(State_Error);
- ::syslog(LOG_ERR,
- "Exception caught (%s %d/%d), "
- "reset state and waiting "
- "to retry...",
- errorString, errorCode,
- errorSubCode);
+ BOX_ERROR("Exception caught ("
+ << errorString
+ << " " << errorCode
+ << "/" << errorSubCode
+ << "), reset state and "
+ "waiting to retry...");
::sleep(10);
nextSyncTime = currentSyncStartTime +
SecondsToBoxTime(90) +
@@ -832,9 +1235,12 @@ void BackupDaemon::Run2()
}
// Log the stats
- ::syslog(LOG_INFO, "File statistics: total file size uploaded %lld, bytes already on server %lld, encoded size %lld",
- BackupStoreFile::msStats.mBytesInEncodedFiles, BackupStoreFile::msStats.mBytesAlreadyOnServer,
- BackupStoreFile::msStats.mTotalFileStreamSize);
+ BOX_NOTICE("File statistics: total file size uploaded "
+ << BackupStoreFile::msStats.mBytesInEncodedFiles
+ << ", bytes already on server "
+ << BackupStoreFile::msStats.mBytesAlreadyOnServer
+ << ", encoded size "
+ << BackupStoreFile::msStats.mTotalFileStreamSize);
BackupStoreFile::ResetStats();
// Tell anything connected to the command socket
@@ -889,7 +1295,7 @@ int BackupDaemon::UseScriptToSeeIfSyncAllowed()
std::string line;
if(getLine.GetLine(line, true, 30000)) // 30 seconds should be enough
{
- // Got a string, intepret
+ // Got a string, interpret
if(line == "now")
{
// Script says do it now. Obey.
@@ -897,27 +1303,46 @@ int BackupDaemon::UseScriptToSeeIfSyncAllowed()
}
else
{
- // How many seconds to wait?
- waitInSeconds = BoxConvert::Convert<int32_t, const std::string&>(line);
- ::syslog(LOG_INFO, "Delaying sync by %d seconds (SyncAllowScript '%s')", waitInSeconds, conf.GetKeyValue("SyncAllowScript").c_str());
+ try
+ {
+ // How many seconds to wait?
+ waitInSeconds = BoxConvert::Convert<int32_t, const std::string&>(line);
+ }
+ catch(ConversionException &e)
+ {
+ BOX_ERROR("Invalid output "
+ "from SyncAllowScript '"
+ << conf.GetKeyValue("SyncAllowScript")
+ << "': '" << line << "'");
+ throw;
+ }
+
+ BOX_NOTICE("Delaying sync by " << waitInSeconds
+ << " seconds (SyncAllowScript '"
+ << conf.GetKeyValue("SyncAllowScript")
+ << "')");
}
}
- // Wait and then cleanup child process
- int status = 0;
- ::waitpid(pid, &status, 0);
+ }
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error running SyncAllowScript: "
+ << e.what());
}
catch(...)
{
// Ignore any exceptions
// Log that something bad happened
- ::syslog(LOG_ERR, "Error running SyncAllowScript '%s'", conf.GetKeyValue("SyncAllowScript").c_str());
- // Clean up though
- if(pid != 0)
- {
- int status = 0;
- ::waitpid(pid, &status, 0);
- }
+ BOX_ERROR("Error running SyncAllowScript '"
+ << conf.GetKeyValue("SyncAllowScript") << "'");
+ }
+
+ // Wait and then cleanup child process, if any
+ if(pid != 0)
+ {
+ int status = 0;
+ ::waitpid(pid, &status, 0);
}
return waitInSeconds;
@@ -937,32 +1362,34 @@ int BackupDaemon::UseScriptToSeeIfSyncAllowed()
void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFlagOut, bool &SyncIsForcedOut)
{
#ifdef WIN32
- // Really could use some interprocess protection, mutex etc
- // any side effect should be too bad???? :)
- DWORD timeout = (DWORD)BoxTimeToMilliSeconds(RequiredDelay);
+ DWORD requiredDelayMs = BoxTimeToMilliSeconds(RequiredDelay);
- while ( this->mReceivedCommandConn == false )
- {
- Sleep(1);
+ DWORD result = WaitForSingleObject(mhCommandReceivedEvent,
+ (DWORD)requiredDelayMs);
- if ( timeout == 0 )
- {
- DoSyncFlagOut = false;
- SyncIsForcedOut = false;
- return;
- }
- timeout--;
+ if(result == WAIT_OBJECT_0)
+ {
+ DoSyncFlagOut = this->mDoSyncFlagOut;
+ SyncIsForcedOut = this->mSyncIsForcedOut;
+ ResetEvent(mhCommandReceivedEvent);
+ }
+ else if(result == WAIT_TIMEOUT)
+ {
+ DoSyncFlagOut = false;
+ SyncIsForcedOut = false;
+ }
+ else
+ {
+ BOX_ERROR("Unexpected result from WaitForSingleObject: "
+ "error " << GetLastError());
}
- this->mReceivedCommandConn = false;
- DoSyncFlagOut = this->mDoSyncFlagOut;
- SyncIsForcedOut = this->mSyncIsForcedOut;
return;
#else // ! WIN32
ASSERT(mpCommandSocketInfo != 0);
if(mpCommandSocketInfo == 0) {::sleep(1); return;} // failure case isn't too bad
- TRACE1("Wait on command socket, delay = %lld\n", RequiredDelay);
+ BOX_TRACE("Wait on command socket, delay = " << RequiredDelay);
try
{
@@ -988,7 +1415,7 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
{
#ifdef PLATFORM_CANNOT_FIND_PEER_UID_OF_UNIX_SOCKET
bool uidOK = true;
- ::syslog(LOG_WARNING, "On this platform, no security check can be made on the credientials of peers connecting to the command socket. (bbackupctl)");
+ BOX_WARNING("On this platform, no security check can be made on the credentials of peers connecting to the command socket. (bbackupctl)");
#else
// Security check -- does the process connecting to this socket have
// the same UID as this process?
@@ -1013,14 +1440,14 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
if(!uidOK)
{
// Dump the connection
- ::syslog(LOG_ERR, "Incoming command connection from peer had different user ID than this process, or security check could not be completed.");
+ BOX_ERROR("Incoming command connection from peer had different user ID than this process, or security check could not be completed.");
mpCommandSocketInfo->mpConnectedSocket.reset();
return;
}
else
{
// Log
- ::syslog(LOG_INFO, "Connection from command socket");
+ BOX_INFO("Connection from command socket");
// Send a header line summarising the configuration and current state
const Configuration &conf(GetConfiguration());
@@ -1058,7 +1485,8 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
while(mpCommandSocketInfo->mpGetLine != 0 && !mpCommandSocketInfo->mpGetLine->IsEOF()
&& mpCommandSocketInfo->mpGetLine->GetLine(command, false /* no preprocessing */, timeout))
{
- TRACE1("Receiving command '%s' over command socket\n", command.c_str());
+ BOX_TRACE("Receiving command '" << command
+ << "' over command socket");
bool sendOK = false;
bool sendResponse = true;
@@ -1113,13 +1541,29 @@ void BackupDaemon::WaitOnCommandSocket(box_time_t RequiredDelay, bool &DoSyncFla
CloseCommandConnection();
}
}
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error in command socket thread: "
+ << e.what());
+ // If an error occurs, and there is a connection active, just close that
+ // connection and continue. Otherwise, let the error propagate.
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
+ {
+ throw; // thread will die
+ }
+ else
+ {
+ // Close socket and ignore error
+ CloseCommandConnection();
+ }
+ }
catch(...)
{
// If an error occurs, and there is a connection active, just close that
// connection and continue. Otherwise, let the error propagate.
if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
{
- throw;
+ throw; // thread will die
}
else
{
@@ -1144,7 +1588,7 @@ void BackupDaemon::CloseCommandConnection()
#ifndef WIN32
try
{
- TRACE0("Closing command connection\n");
+ BOX_TRACE("Closing command connection");
if(mpCommandSocketInfo->mpGetLine)
{
@@ -1153,6 +1597,11 @@ void BackupDaemon::CloseCommandConnection()
}
mpCommandSocketInfo->mpConnectedSocket.reset();
}
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error while closing command "
+ "socket: " << e.what());
+ }
catch(...)
{
// Ignore any errors
@@ -1172,14 +1621,10 @@ void BackupDaemon::CloseCommandConnection()
// --------------------------------------------------------------------------
void BackupDaemon::SendSyncStartOrFinish(bool SendStart)
{
- // The bbackupctl program can't rely on a state change, because it may never
- // change if the server doesn't need to be contacted.
+ // The bbackupctl program can't rely on a state change, because it
+ // may never change if the server doesn't need to be contacted.
-#ifdef __MINGW32__
-#warning race condition: what happens if socket is closed?
-#endif
-
- if (mpCommandSocketInfo != NULL &&
+ if(mpCommandSocketInfo != NULL &&
#ifdef WIN32
mpCommandSocketInfo->mListeningSocket.IsConnected()
#else
@@ -1187,17 +1632,26 @@ void BackupDaemon::SendSyncStartOrFinish(bool SendStart)
#endif
)
{
- const char* message = SendStart ? "start-sync\n" : "finish-sync\n";
+ std::string message = SendStart ? "start-sync" : "finish-sync";
try
{
#ifdef WIN32
- mpCommandSocketInfo->mListeningSocket.Write(message,
- (int)strlen(message));
+ EnterCriticalSection(&mMessageQueueLock);
+ mMessageList.push_back(message);
+ SetEvent(mhMessageToSendEvent);
+ LeaveCriticalSection(&mMessageQueueLock);
#else
- mpCommandSocketInfo->mpConnectedSocket->Write(message,
- strlen(message));
+ message += "\n";
+ mpCommandSocketInfo->mpConnectedSocket->Write(
+ message.c_str(), message.size());
#endif
}
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error while sending to "
+ "command socket client: " << e.what());
+ CloseCommandConnection();
+ }
catch(...)
{
CloseCommandConnection();
@@ -1298,7 +1752,7 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
struct mntent *entry = 0;
while((entry = ::getmntent(mountPointsFile)) != 0)
{
- TRACE1("Found mount point at %s\n", entry->mnt_dir);
+ BOX_TRACE("Found mount point at " << entry->mnt_dir);
mountPoints.insert(std::string(entry->mnt_dir));
}
@@ -1307,7 +1761,7 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
}
catch(...)
{
- ::endmntent(mountPointsFile);
+ ::endmntent(mountPointsFile);
throw;
}
#else // ! HAVE_STRUCT_MNTENT_MNT_DIR
@@ -1320,12 +1774,11 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
try
{
-
// Read all the entries, and put them in the set
struct mnttab entry;
while(getmntent(mountPointsFile, &entry) == 0)
{
- TRACE1("Found mount point at %s\n", entry.mnt_mountp);
+ BOX_TRACE("Found mount point at " << entry.mnt_mountp);
mountPoints.insert(std::string(entry.mnt_mountp));
}
@@ -1354,19 +1807,37 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
for(std::list<std::pair<std::string, Configuration> >::const_iterator i = rLocationsConf.mSubConfigurations.begin();
i != rLocationsConf.mSubConfigurations.end(); ++i)
{
-TRACE0("new location\n");
+ BOX_TRACE("new location: " << i->first);
// Create a record for it
- Location *ploc = new Location;
+ std::auto_ptr<Location> apLoc(new Location);
+
try
{
// Setup names in the location record
- ploc->mName = i->first;
- ploc->mPath = i->second.GetKeyValue("Path");
+ apLoc->mName = i->first;
+ apLoc->mPath = i->second.GetKeyValue("Path");
// Read the exclude lists from the Configuration
- ploc->mpExcludeFiles = BackupClientMakeExcludeList_Files(i->second);
- ploc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(i->second);
-
+ apLoc->mpExcludeFiles = BackupClientMakeExcludeList_Files(i->second);
+ apLoc->mpExcludeDirs = BackupClientMakeExcludeList_Dirs(i->second);
+
+ // Does this exist on the server?
+ // Remove from dir object early, so that if we fail
+ // to stat the local directory, we still don't
+ // consider to remote one for deletion.
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreFilenameClear dirname(apLoc->mName); // generate the filename
+ BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
+ int64_t oid = 0;
+ if(en != 0)
+ {
+ oid = en->GetObjectID();
+
+ // Delete the entry from the directory, so we get a list of
+ // unused root directories at the end of this.
+ dir.DeleteEntry(oid);
+ }
+
// Do a fsstat on the pathname to find out which mount it's on
{
@@ -1375,13 +1846,18 @@ TRACE0("new location\n");
// BSD style statfs -- includes mount point, which is nice.
#ifdef HAVE_STRUCT_STATVFS_F_MNTONNAME
struct statvfs s;
- if(::statvfs(ploc->mPath.c_str(), &s) != 0)
+ if(::statvfs(apLoc->mPath.c_str(), &s) != 0)
#else // HAVE_STRUCT_STATVFS_F_MNTONNAME
struct statfs s;
- if(::statfs(ploc->mPath.c_str(), &s) != 0)
+ if(::statfs(apLoc->mPath.c_str(), &s) != 0)
#endif // HAVE_STRUCT_STATVFS_F_MNTONNAME
{
- THROW_EXCEPTION(CommonException, OSFileError)
+ BOX_WARNING("Failed to stat location "
+ "path '" << apLoc->mPath <<
+ "' (" << strerror(errno) <<
+ "), skipping location '" <<
+ apLoc->mName << "'");
+ continue;
}
// Where the filesystem is mounted
@@ -1390,29 +1866,34 @@ TRACE0("new location\n");
#else // !HAVE_STRUCT_STATFS_F_MNTONNAME && !WIN32
// Warn in logs if the directory isn't absolute
- if(ploc->mPath[0] != '/')
+ if(apLoc->mPath[0] != '/')
{
- ::syslog(LOG_ERR, "Location path '%s' isn't absolute", ploc->mPath.c_str());
+ BOX_WARNING("Location path '"
+ << apLoc->mPath
+ << "' is not absolute");
}
// Go through the mount points found, and find a suitable one
std::string mountName("/");
{
std::set<std::string, mntLenCompare>::const_iterator i(mountPoints.begin());
- TRACE1("%d potential mount points\n", mountPoints.size());
+ BOX_TRACE(mountPoints.size()
+ << " potential mount points");
for(; i != mountPoints.end(); ++i)
{
// Compare first n characters with the filename
// If it matches, the file belongs in that mount point
// (sorting order ensures this)
- TRACE1("checking against mount point %s\n", i->c_str());
- if(::strncmp(i->c_str(), ploc->mPath.c_str(), i->size()) == 0)
+ BOX_TRACE("checking against mount point " << *i);
+ if(::strncmp(i->c_str(), apLoc->mPath.c_str(), i->size()) == 0)
{
// Match
mountName = *i;
break;
}
}
- TRACE2("mount point chosen for %s is %s\n", ploc->mPath.c_str(), mountName.c_str());
+ BOX_TRACE("mount point chosen for "
+ << apLoc->mPath << " is "
+ << mountName);
}
#endif
@@ -1422,12 +1903,12 @@ TRACE0("new location\n");
if(f != mounts.end())
{
// Yes -- store the index
- ploc->mIDMapIndex = f->second;
+ apLoc->mIDMapIndex = f->second;
}
else
{
// No -- new index
- ploc->mIDMapIndex = numIDMaps;
+ apLoc->mIDMapIndex = numIDMaps;
mounts[mountName] = numIDMaps;
// Store the mount name
@@ -1439,49 +1920,73 @@ TRACE0("new location\n");
}
// Does this exist on the server?
- BackupStoreDirectory::Iterator iter(dir);
- BackupStoreFilenameClear dirname(ploc->mName); // generate the filename
- BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
- int64_t oid = 0;
- if(en != 0)
- {
- oid = en->GetObjectID();
-
- // Delete the entry from the directory, so we get a list of
- // unused root directories at the end of this.
- dir.DeleteEntry(oid);
- }
- else
+ if(en == 0)
{
// Doesn't exist, so it has to be created on the server. Let's go!
// First, get the directory's attributes and modification time
box_time_t attrModTime = 0;
BackupClientFileAttributes attr;
- attr.ReadAttributes(ploc->mPath.c_str(), true /* directories have zero mod times */,
- 0 /* not interested in mod time */, &attrModTime /* get the attribute modification time */);
+ try
+ {
+ attr.ReadAttributes(apLoc->mPath.c_str(),
+ true /* directories have zero mod times */,
+ 0 /* not interested in mod time */,
+ &attrModTime /* get the attribute modification time */);
+ }
+ catch (BoxException &e)
+ {
+ BOX_ERROR("Failed to get attributes "
+ "for path '" << apLoc->mPath
+ << "', skipping location '" <<
+ apLoc->mName << "'");
+ continue;
+ }
// Execute create directory command
- MemBlockStream attrStream(attr);
- std::auto_ptr<BackupProtocolClientSuccess> dirCreate(connection.QueryCreateDirectory(
- BackupProtocolClientListDirectory::RootDirectory,
- attrModTime, dirname, attrStream));
-
- // Object ID for later creation
- oid = dirCreate->GetObjectID();
+ try
+ {
+ MemBlockStream attrStream(attr);
+ std::auto_ptr<BackupProtocolClientSuccess>
+ dirCreate(connection.QueryCreateDirectory(
+ BackupProtocolClientListDirectory::RootDirectory,
+ attrModTime, dirname, attrStream));
+
+ // Object ID for later creation
+ oid = dirCreate->GetObjectID();
+ }
+ catch (BoxException &e)
+ {
+ BOX_ERROR("Failed to create remote "
+ "directory '/" << apLoc->mName <<
+ "', skipping location '" <<
+ apLoc->mName << "'");
+ continue;
+ }
+
}
// Create and store the directory object for the root of this location
ASSERT(oid != 0);
BackupClientDirectoryRecord *precord = new BackupClientDirectoryRecord(oid, i->first);
- ploc->mpDirectoryRecord.reset(precord);
+ apLoc->mpDirectoryRecord.reset(precord);
// Push it back on the vector of locations
- mLocations.push_back(ploc);
+ mLocations.push_back(apLoc.release());
+ }
+ catch (std::exception &e)
+ {
+ BOX_ERROR("Failed to configure location '"
+ << apLoc->mName << "' path '"
+ << apLoc->mPath << "': " << e.what() <<
+ ": please check for previous errors");
+ throw;
}
catch(...)
{
- delete ploc;
- ploc = 0;
+ BOX_ERROR("Failed to configure location '"
+ << apLoc->mName << "' path '"
+ << apLoc->mPath << "': please check for "
+ "previous errors");
throw;
}
}
@@ -1489,8 +1994,26 @@ TRACE0("new location\n");
// Any entries in the root directory which need deleting?
if(dir.GetNumberOfEntries() > 0)
{
- ::syslog(LOG_INFO, "%d redundant locations in root directory found, will delete from store after %d seconds.",
- dir.GetNumberOfEntries(), BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
+ box_time_t now = GetCurrentBoxTime();
+
+ // This should reset the timer if the list of unused
+ // locations changes, but it will not if the number of
+ // unused locations does not change, but the locations
+ // do change, e.g. one mysteriously appears and another
+ // mysteriously appears. (FIXME)
+ if (dir.GetNumberOfEntries() != mUnusedRootDirEntries.size() ||
+ mDeleteUnusedRootDirEntriesAfter == 0)
+ {
+ mDeleteUnusedRootDirEntriesAfter = now +
+ SecondsToBoxTime(mDeleteRedundantLocationsAfter);
+ }
+
+ int secs = BoxTimeToSeconds(mDeleteUnusedRootDirEntriesAfter
+ - now);
+
+ BOX_NOTICE(dir.GetNumberOfEntries() << " redundant locations "
+ "in root directory found, will delete from store "
+ "after " << secs << " seconds.");
// Store directories in list of things to delete
mUnusedRootDirEntries.clear();
@@ -1501,14 +2024,13 @@ TRACE0("new location\n");
// Add name to list
BackupStoreFilenameClear clear(en->GetName());
const std::string &name(clear.GetClearFilename());
- mUnusedRootDirEntries.push_back(std::pair<int64_t,std::string>(en->GetObjectID(), name));
+ mUnusedRootDirEntries.push_back(
+ std::pair<int64_t,std::string>
+ (en->GetObjectID(), name));
// Log this
- ::syslog(LOG_INFO, "Unused location in root: %s", name.c_str());
+ BOX_INFO("Unused location in root: " << name);
}
ASSERT(mUnusedRootDirEntries.size() > 0);
- // Time to delete them
- mDeleteUnusedRootDirEntriesAfter =
- GetCurrentBoxTime() + SecondsToBoxTime(BACKUP_DELETE_UNUSED_ROOT_ENTRIES_AFTER);
}
}
@@ -1620,14 +2142,14 @@ void BackupDaemon::DeleteCorruptBerkelyDbFiles()
MakeMapBaseName(l, filename);
// Delete the file
- TRACE1("Deleting %s\n", filename.c_str());
+ BOX_TRACE("Deleting " << filename);
::unlink(filename.c_str());
// Add a suffix for the new map
filename += ".n";
// Delete that too
- TRACE1("Deleting %s\n", filename.c_str());
+ BOX_TRACE("Deleting " << filename);
::unlink(filename.c_str());
}
}
@@ -1707,6 +2229,9 @@ void BackupDaemon::CommitIDMapsAfterSync()
#endif
if(::rename(newmap.c_str(), target.c_str()) != 0)
{
+ BOX_ERROR("failed to rename ID map: " << newmap
+ << " to " << target << ": "
+ << strerror(errno));
THROW_EXCEPTION(CommonException, OSFileError)
}
}
@@ -1791,39 +2316,44 @@ void BackupDaemon::SetState(int State)
// command socket if there's an error
char newState[64];
- char newStateSize = sprintf(newState, "state %d\n", State);
+ sprintf(newState, "state %d", State);
+ std::string message = newState;
#ifdef WIN32
- #ifndef _MSC_VER
- #warning FIX ME: race condition
- #endif
+ EnterCriticalSection(&mMessageQueueLock);
+ mMessageList.push_back(newState);
+ SetEvent(mhMessageToSendEvent);
+ LeaveCriticalSection(&mMessageQueueLock);
+#else
+ message += "\n";
- // what happens if the socket is closed by the other thread before
- // we can write to it? Null pointer deref at best.
- if (mpCommandSocketInfo &&
- mpCommandSocketInfo->mListeningSocket.IsConnected())
+ if(mpCommandSocketInfo == 0)
{
- try
- {
- mpCommandSocketInfo->mListeningSocket.Write(newState, newStateSize);
- }
- catch(...)
- {
- CloseCommandConnection();
- }
+ return;
}
-#else
- if(mpCommandSocketInfo != 0 && mpCommandSocketInfo->mpConnectedSocket.get() != 0)
+
+ if(mpCommandSocketInfo->mpConnectedSocket.get() == 0)
{
- // Something connected to the command socket, tell it about the new state
- try
- {
- mpCommandSocketInfo->mpConnectedSocket->Write(newState, newStateSize);
- }
- catch(...)
- {
- CloseCommandConnection();
- }
+ return;
+ }
+
+ // Something connected to the command socket, tell it about the new state
+ try
+ {
+ mpCommandSocketInfo->mpConnectedSocket->Write(message.c_str(),
+ message.length());
+ }
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error while writing state "
+ "to command socket: " << e.what());
+ CloseCommandConnection();
+ }
+ catch(...)
+ {
+ BOX_ERROR("Internal error while writing state "
+ "to command socket: unknown error");
+ CloseCommandConnection();
}
#endif
}
@@ -1854,49 +2384,80 @@ void BackupDaemon::TouchFileInWorkingDir(const char *Filename)
//
// Function
// Name: BackupDaemon::NotifySysadmin(int)
-// Purpose: Run the script to tell the sysadmin about events which need attention.
+// Purpose: Run the script to tell the sysadmin about events
+// which need attention.
// Created: 25/2/04
//
// --------------------------------------------------------------------------
void BackupDaemon::NotifySysadmin(int Event)
{
- static const char *sEventNames[] = {"store-full", "read-error", 0};
+ static const char *sEventNames[] =
+ {
+ "store-full",
+ "read-error",
+ "backup-error",
+ "backup-start",
+ "backup-finish",
+ 0
+ };
+
+ BOX_TRACE("sizeof(sEventNames) == " << sizeof(sEventNames));
+ BOX_TRACE("sizeof(*sEventNames) == " << sizeof(*sEventNames));
+ BOX_TRACE("NotifyEvent__MAX == " << NotifyEvent__MAX);
+ ASSERT((sizeof(sEventNames)/sizeof(*sEventNames)) == NotifyEvent__MAX + 1);
- TRACE1("BackupDaemon::NotifySysadmin() called, event = %d\n", Event);
+ BOX_TRACE("BackupDaemon::NotifySysadmin() called, event = " <<
+ sEventNames[Event]);
- if(Event < 0 || Event > NotifyEvent__MAX)
+ if(Event < 0 || Event >= NotifyEvent__MAX)
{
- THROW_EXCEPTION(BackupStoreException, BadNotifySysadminEventCode);
+ THROW_EXCEPTION(BackupStoreException,
+ BadNotifySysadminEventCode);
}
// Don't send lots of repeated messages
- if(mNotificationsSent[Event])
+ if(mNotificationsSent[Event] &&
+ Event != NotifyEvent_BackupStart &&
+ Event != NotifyEvent_BackupFinish)
{
+ BOX_WARNING("Suppressing duplicate notification about " <<
+ sEventNames[Event]);
return;
}
- // Is there a notifation script?
+ // Is there a notification script?
const Configuration &conf(GetConfiguration());
if(!conf.KeyExists("NotifyScript"))
{
// Log, and then return
- ::syslog(LOG_ERR, "Not notifying administrator about event %s -- set NotifyScript to do this in future", sEventNames[Event]);
+ if(Event != NotifyEvent_BackupStart &&
+ Event != NotifyEvent_BackupFinish)
+ {
+ BOX_ERROR("Not notifying administrator about event "
+ << sEventNames[Event] << " -- set NotifyScript "
+ "to do this in future");
+ }
return;
}
// Script to run
- std::string script(conf.GetKeyValue("NotifyScript") + ' ' + sEventNames[Event]);
+ std::string script(conf.GetKeyValue("NotifyScript") + ' ' +
+ sEventNames[Event]);
// Log what we're about to do
- ::syslog(LOG_INFO, "About to notify administrator about event %s, running script '%s'", sEventNames[Event], script.c_str());
+ BOX_NOTICE("About to notify administrator about event "
+ << sEventNames[Event] << ", running script '"
+ << script << "'");
// Then do it
if(::system(script.c_str()) != 0)
{
- ::syslog(LOG_ERR, "Notify script returned an error code. ('%s')", script.c_str());
+ BOX_ERROR("Notify script returned an error code. ('"
+ << script << "')");
}
- // Flag that this is done so the administrator isn't constantly bombarded with lots of errors
+ // Flag that this is done so the administrator isn't constantly
+ // bombarded with lots of errors
mNotificationsSent[Event] = true;
}
@@ -1911,28 +2472,40 @@ void BackupDaemon::NotifySysadmin(int Event)
// --------------------------------------------------------------------------
void BackupDaemon::DeleteUnusedRootDirEntries(BackupClientContext &rContext)
{
- if(mUnusedRootDirEntries.empty() || mDeleteUnusedRootDirEntriesAfter == 0)
+ if(mUnusedRootDirEntries.empty())
{
- // Nothing to do.
+ BOX_INFO("Not deleting unused entries - none in list");
return;
}
+ if(mDeleteUnusedRootDirEntriesAfter == 0)
+ {
+ BOX_INFO("Not deleting unused entries - "
+ "zero delete time (bad)");
+ return;
+ }
+
// Check time
- if(GetCurrentBoxTime() < mDeleteUnusedRootDirEntriesAfter)
+ box_time_t now = GetCurrentBoxTime();
+ if(now < mDeleteUnusedRootDirEntriesAfter)
{
- // Too early to delete files
+ int secs = BoxTimeToSeconds(mDeleteUnusedRootDirEntriesAfter
+ - now);
+ BOX_INFO("Not deleting unused entries - too early ("
+ << secs << " seconds remaining)");
return;
}
// Entries to delete, and it's the right time to do so...
- ::syslog(LOG_INFO, "Deleting unused locations from store root...");
+ BOX_NOTICE("Deleting unused locations from store root...");
BackupProtocolClient &connection(rContext.GetConnection());
for(std::vector<std::pair<int64_t,std::string> >::iterator i(mUnusedRootDirEntries.begin()); i != mUnusedRootDirEntries.end(); ++i)
{
connection.QueryDeleteDirectory(i->first);
// Log this
- ::syslog(LOG_INFO, "Deleted %s (ID %08llx) from store root", i->second.c_str(), i->first);
+ BOX_NOTICE("Deleted " << i->second << " (ID " << i->first
+ << ") from store root");
}
// Reset state
@@ -2006,12 +2579,12 @@ void BackupDaemon::Location::Deserialize(Archive &rArchive)
//
//
mpDirectoryRecord.reset(NULL);
- if (mpExcludeFiles)
+ if(mpExcludeFiles)
{
delete mpExcludeFiles;
mpExcludeFiles = NULL;
}
- if (mpExcludeDirs)
+ if(mpExcludeDirs)
{
delete mpExcludeDirs;
mpExcludeDirs = NULL;
@@ -2030,15 +2603,17 @@ void BackupDaemon::Location::Deserialize(Archive &rArchive)
int64_t aMagicMarker = 0;
rArchive.Read(aMagicMarker);
- if (aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
+ if(aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
{
// NOOP
}
- else if (aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
+ else if(aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
{
BackupClientDirectoryRecord *pSubRecord = new BackupClientDirectoryRecord(0, "");
- if (!pSubRecord)
+ if(!pSubRecord)
+ {
throw std::bad_alloc();
+ }
mpDirectoryRecord.reset(pSubRecord);
mpDirectoryRecord->Deserialize(rArchive);
@@ -2046,7 +2621,7 @@ void BackupDaemon::Location::Deserialize(Archive &rArchive)
else
{
// there is something going on here
- THROW_EXCEPTION(CommonException, Internal)
+ THROW_EXCEPTION(ClientException, CorruptStoreObjectInfoFile);
}
//
@@ -2054,22 +2629,24 @@ void BackupDaemon::Location::Deserialize(Archive &rArchive)
//
rArchive.Read(aMagicMarker);
- if (aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
+ if(aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
{
// NOOP
}
- else if (aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
+ else if(aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
{
mpExcludeFiles = new ExcludeList;
- if (!mpExcludeFiles)
+ if(!mpExcludeFiles)
+ {
throw std::bad_alloc();
+ }
mpExcludeFiles->Deserialize(rArchive);
}
else
{
// there is something going on here
- THROW_EXCEPTION(CommonException, Internal)
+ THROW_EXCEPTION(ClientException, CorruptStoreObjectInfoFile);
}
//
@@ -2077,22 +2654,24 @@ void BackupDaemon::Location::Deserialize(Archive &rArchive)
//
rArchive.Read(aMagicMarker);
- if (aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
+ if(aMagicMarker == ARCHIVE_MAGIC_VALUE_NOOP)
{
// NOOP
}
- else if (aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
+ else if(aMagicMarker == ARCHIVE_MAGIC_VALUE_RECURSE)
{
mpExcludeDirs = new ExcludeList;
- if (!mpExcludeDirs)
+ if(!mpExcludeDirs)
+ {
throw std::bad_alloc();
+ }
mpExcludeDirs->Deserialize(rArchive);
}
else
{
// there is something going on here
- THROW_EXCEPTION(CommonException, Internal)
+ THROW_EXCEPTION(ClientException, CorruptStoreObjectInfoFile);
}
}
@@ -2117,7 +2696,7 @@ void BackupDaemon::Location::Serialize(Archive & rArchive) const
//
//
//
- if (mpDirectoryRecord.get() == NULL)
+ if(mpDirectoryRecord.get() == NULL)
{
int64_t aMagicMarker = ARCHIVE_MAGIC_VALUE_NOOP;
rArchive.Write(aMagicMarker);
@@ -2133,7 +2712,7 @@ void BackupDaemon::Location::Serialize(Archive & rArchive) const
//
//
//
- if (!mpExcludeFiles)
+ if(!mpExcludeFiles)
{
int64_t aMagicMarker = ARCHIVE_MAGIC_VALUE_NOOP;
rArchive.Write(aMagicMarker);
@@ -2149,7 +2728,7 @@ void BackupDaemon::Location::Serialize(Archive & rArchive) const
//
//
//
- if (!mpExcludeDirs)
+ if(!mpExcludeDirs)
{
int64_t aMagicMarker = ARCHIVE_MAGIC_VALUE_NOOP;
rArchive.Write(aMagicMarker);
@@ -2206,27 +2785,31 @@ BackupDaemon::CommandSocketInfo::~CommandSocketInfo()
static const int STOREOBJECTINFO_MAGIC_ID_VALUE = 0x7777525F;
static const std::string STOREOBJECTINFO_MAGIC_ID_STRING = "BBACKUPD-STATE";
-static const int STOREOBJECTINFO_VERSION = 1;
+static const int STOREOBJECTINFO_VERSION = 2;
-void BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime) const
+bool BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime) const
{
if(!GetConfiguration().KeyExists("StoreObjectInfoFile"))
{
- return;
+ return false;
}
std::string StoreObjectInfoFile =
GetConfiguration().GetKeyValue("StoreObjectInfoFile");
- if (StoreObjectInfoFile.size() <= 0)
+ if(StoreObjectInfoFile.size() <= 0)
{
- return;
+ return false;
}
+ bool created = false;
+
try
{
FileStream aFile(StoreObjectInfoFile.c_str(),
O_WRONLY | O_CREAT | O_TRUNC);
+ created = true;
+
Archive anArchive(aFile, 0);
anArchive.Write(STOREOBJECTINFO_MAGIC_ID_VALUE);
@@ -2243,7 +2826,7 @@ void BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
int64_t iCount = mLocations.size();
anArchive.Write(iCount);
- for (int v = 0; v < iCount; v++)
+ for(int v = 0; v < iCount; v++)
{
ASSERT(mLocations[v]);
mLocations[v]->Serialize(anArchive);
@@ -2255,22 +2838,48 @@ void BackupDaemon::SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time
iCount = mIDMapMounts.size();
anArchive.Write(iCount);
- for (int v = 0; v < iCount; v++)
+ for(int v = 0; v < iCount; v++)
anArchive.Write(mIDMapMounts[v]);
//
//
//
+ iCount = mUnusedRootDirEntries.size();
+ anArchive.Write(iCount);
+
+ for(int v = 0; v < iCount; v++)
+ {
+ anArchive.Write(mUnusedRootDirEntries[v].first);
+ anArchive.Write(mUnusedRootDirEntries[v].second);
+ }
+
+ if (iCount > 0)
+ {
+ anArchive.Write(mDeleteUnusedRootDirEntriesAfter);
+ }
+
+ //
+ //
+ //
aFile.Close();
- ::syslog(LOG_INFO, "Saved store object info file '%s'",
- StoreObjectInfoFile.c_str());
+ BOX_INFO("Saved store object info file version " <<
+ STOREOBJECTINFO_VERSION << " (" <<
+ StoreObjectInfoFile << ")");
}
- catch (...)
+ catch(std::exception &e)
{
- ::syslog(LOG_WARNING, "Requested store object info file '%s' "
- "not accessible or could not be created",
- StoreObjectInfoFile.c_str());
+ BOX_ERROR("Internal error writing store object "
+ "info file (" << StoreObjectInfoFile << "): "
+ << e.what());
}
+ catch(...)
+ {
+ BOX_ERROR("Internal error writing store object "
+ "info file (" << StoreObjectInfoFile << "): "
+ "unknown error");
+ }
+
+ return created;
}
// --------------------------------------------------------------------------
@@ -2300,7 +2909,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
std::string StoreObjectInfoFile =
GetConfiguration().GetKeyValue("StoreObjectInfoFile");
- if (StoreObjectInfoFile.size() <= 0)
+ if(StoreObjectInfoFile.size() <= 0)
{
return false;
}
@@ -2316,12 +2925,12 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
int iMagicValue = 0;
anArchive.Read(iMagicValue);
- if (iMagicValue != STOREOBJECTINFO_MAGIC_ID_VALUE)
+ if(iMagicValue != STOREOBJECTINFO_MAGIC_ID_VALUE)
{
- ::syslog(LOG_WARNING, "Store object info file '%s' "
+ BOX_WARNING("Store object info file "
"is not a valid or compatible serialised "
- "archive. Will re-cache from store.",
- StoreObjectInfoFile.c_str());
+ "archive. Will re-cache from store. "
+ "(" << StoreObjectInfoFile << ")");
return false;
}
@@ -2331,12 +2940,12 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
std::string strMagicValue;
anArchive.Read(strMagicValue);
- if (strMagicValue != STOREOBJECTINFO_MAGIC_ID_STRING)
+ if(strMagicValue != STOREOBJECTINFO_MAGIC_ID_STRING)
{
- ::syslog(LOG_WARNING, "Store object info file '%s' "
+ BOX_WARNING("Store object info file "
"is not a valid or compatible serialised "
- "archive. Will re-cache from store.",
- StoreObjectInfoFile.c_str());
+ "archive. Will re-cache from store. "
+ "(" << StoreObjectInfoFile << ")");
return false;
}
@@ -2347,13 +2956,12 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
int iVersion = 0;
anArchive.Read(iVersion);
- if (iVersion != STOREOBJECTINFO_VERSION)
+ if(iVersion != STOREOBJECTINFO_VERSION)
{
- ::syslog(LOG_WARNING, "Store object info file '%s' "
- "version %d unsupported. "
- "Will re-cache from store.",
- StoreObjectInfoFile.c_str(),
- iVersion);
+ BOX_WARNING("Store object info file "
+ "version " << iVersion << " unsupported. "
+ "Will re-cache from store. "
+ "(" << StoreObjectInfoFile << ")");
return false;
}
@@ -2364,11 +2972,11 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
box_time_t lastKnownConfigModTime;
anArchive.Read(lastKnownConfigModTime);
- if (lastKnownConfigModTime != GetLoadedConfigModifiedTime())
+ if(lastKnownConfigModTime != GetLoadedConfigModifiedTime())
{
- ::syslog(LOG_WARNING, "Store object info file '%s' "
- "out of date. Will re-cache from store",
- StoreObjectInfoFile.c_str());
+ BOX_WARNING("Store object info file "
+ "out of date. Will re-cache from store. "
+ "(" << StoreObjectInfoFile << ")");
return false;
}
@@ -2385,11 +2993,13 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
int64_t iCount = 0;
anArchive.Read(iCount);
- for (int v = 0; v < iCount; v++)
+ for(int v = 0; v < iCount; v++)
{
Location* pLocation = new Location;
- if (!pLocation)
+ if(!pLocation)
+ {
throw std::bad_alloc();
+ }
pLocation->Deserialize(anArchive);
mLocations.push_back(pLocation);
@@ -2401,7 +3011,7 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
iCount = 0;
anArchive.Read(iCount);
- for (int v = 0; v < iCount; v++)
+ for(int v = 0; v < iCount; v++)
{
std::string strItem;
anArchive.Read(strItem);
@@ -2412,28 +3022,53 @@ bool BackupDaemon::DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_
//
//
//
- aFile.Close();
- ::syslog(LOG_INFO, "Loaded store object info file '%s', "
- "version [%d]", StoreObjectInfoFile.c_str(),
- iVersion);
+ iCount = 0;
+ anArchive.Read(iCount);
+
+ for(int v = 0; v < iCount; v++)
+ {
+ int64_t anId;
+ anArchive.Read(anId);
+ std::string aName;
+ anArchive.Read(aName);
+
+ mUnusedRootDirEntries.push_back(std::pair<int64_t, std::string>(anId, aName));
+ }
+
+ if (iCount > 0)
+ anArchive.Read(mDeleteUnusedRootDirEntriesAfter);
+
+ //
+ //
+ //
+ aFile.Close();
+ BOX_INFO("Loaded store object info file version " << iVersion
+ << " (" << StoreObjectInfoFile << ")");
+
return true;
+ }
+ catch(std::exception &e)
+ {
+ BOX_ERROR("Internal error reading store object info file: "
+ << StoreObjectInfoFile << ": " << e.what());
}
- catch (...)
+ catch(...)
{
- DeleteAllLocations();
+ BOX_ERROR("Internal error reading store object info file: "
+ << StoreObjectInfoFile << ": unknown error");
+ }
- aClientStoreMarker =
- BackupClientContext::ClientStoreMarker_NotKnown;
- theLastSyncTime = 0;
- theNextSyncTime = 0;
+ DeleteAllLocations();
- ::syslog(LOG_WARNING, "Requested store object info file '%s' "
- "does not exist, not accessible, or inconsistent. "
- "Will re-cache from store.",
- StoreObjectInfoFile.c_str());
- }
+ aClientStoreMarker = BackupClientContext::ClientStoreMarker_NotKnown;
+ theLastSyncTime = 0;
+ theNextSyncTime = 0;
+ BOX_WARNING("Store object info file is missing, not accessible, "
+ "or inconsistent. Will re-cache from store. "
+ "(" << StoreObjectInfoFile << ")");
+
return false;
}
@@ -2455,14 +3090,24 @@ bool BackupDaemon::DeleteStoreObjectInfo() const
return false;
}
- std::string StoreObjectInfoFile =
- GetConfiguration().GetKeyValue("StoreObjectInfoFile");
+ std::string storeObjectInfoFile(GetConfiguration().GetKeyValue("StoreObjectInfoFile"));
+
+ // Check to see if the file exists
+ if(!FileExists(storeObjectInfoFile.c_str()))
+ {
+ // File doesn't exist -- so can't be deleted. But something isn't quite right, so log a message
+ BOX_WARNING("Store object info file did not exist when it "
+ "was supposed to. (" << storeObjectInfoFile << ")");
+
+ // Return true to stop things going around in a loop
+ return true;
+ }
- if (::unlink(StoreObjectInfoFile.c_str()) != 0)
+ // Actually delete it
+ if(::unlink(storeObjectInfoFile.c_str()) != 0)
{
- ::syslog(LOG_ERR, "Failed to delete the old "
- "store object info file '%s': %s",
- StoreObjectInfoFile.c_str(), strerror(errno));
+ BOX_ERROR("Failed to delete the old store object info file: "
+ << storeObjectInfoFile << ": "<< strerror(errno));
return false;
}
diff --git a/bin/bbackupd/BackupDaemon.h b/bin/bbackupd/BackupDaemon.h
index 3b63ae3d..e2d7846f 100644
--- a/bin/bbackupd/BackupDaemon.h
+++ b/bin/bbackupd/BackupDaemon.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -54,9 +54,13 @@
#include "BoxTime.h"
#include "Daemon.h"
+#include "BackupClientDirectoryRecord.h"
#include "Socket.h"
#include "SocketListen.h"
#include "SocketStream.h"
+#include "Logging.h"
+#include "autogen_BackupProtocolClient.h"
+
#ifdef WIN32
#include "WinNamedPipeStream.h"
#endif
@@ -77,23 +81,34 @@ class Archive;
// Created: 2003/10/08
//
// --------------------------------------------------------------------------
-class BackupDaemon : public Daemon
+class BackupDaemon : public Daemon, ProgressNotifier
{
public:
BackupDaemon();
~BackupDaemon();
private:
- // methods below do partial (specialized) serialization of client state only
- void SerializeStoreObjectInfo(int64_t aClientStoreMarker, box_time_t theLastSyncTime, box_time_t theNextSyncTime) const;
- bool DeserializeStoreObjectInfo(int64_t & aClientStoreMarker, box_time_t & theLastSyncTime, box_time_t & theNextSyncTime);
+ // methods below do partial (specialized) serialization of
+ // client state only
+ bool SerializeStoreObjectInfo(int64_t aClientStoreMarker,
+ box_time_t theLastSyncTime, box_time_t theNextSyncTime) const;
+ bool DeserializeStoreObjectInfo(int64_t & aClientStoreMarker,
+ box_time_t & theLastSyncTime, box_time_t & theNextSyncTime);
bool DeleteStoreObjectInfo() const;
BackupDaemon(const BackupDaemon &);
+
public:
+ #ifdef WIN32
+ // add command-line options to handle Windows services
+ std::string GetOptionString();
+ int ProcessOption(signed int option);
+ int Main(const std::string &rConfigFileName);
+ #endif
void Run();
virtual const char *DaemonName() const;
- virtual const char *DaemonBanner() const;
+ virtual std::string DaemonBanner() const;
+ virtual void Usage();
const ConfigurationVerify *GetConfigVerify() const;
bool FindLocationPathName(const std::string &rLocationName, std::string &rPathOut) const;
@@ -114,8 +129,11 @@ public:
enum
{
NotifyEvent_StoreFull = 0,
- NotifyEvent_ReadError = 1,
- NotifyEvent__MAX = 1
+ NotifyEvent_ReadError,
+ NotifyEvent_BackupError,
+ NotifyEvent_BackupStart,
+ NotifyEvent_BackupFinish,
+ NotifyEvent__MAX
// When adding notifications, remember to add strings to NotifySysadmin()
};
void NotifySysadmin(int Event);
@@ -186,6 +204,8 @@ private:
std::vector<BackupClientInodeToIDMap *> mCurrentIDMaps;
std::vector<BackupClientInodeToIDMap *> mNewIDMaps;
+ int mDeleteRedundantLocationsAfter;
+
// For the command socket
class CommandSocketInfo
{
@@ -209,18 +229,246 @@ private:
CommandSocketInfo *mpCommandSocketInfo;
// Stop notifications being repeated.
- bool mNotificationsSent[NotifyEvent__MAX + 1];
+ bool mNotificationsSent[NotifyEvent__MAX];
// Unused entries in the root directory wait a while before being deleted
box_time_t mDeleteUnusedRootDirEntriesAfter; // time to delete them
std::vector<std::pair<int64_t,std::string> > mUnusedRootDirEntries;
+public:
+ bool StopRun() { return this->Daemon::StopRun(); }
+
+private:
+ bool mLogAllFileAccess;
+
+ /* ProgressNotifier implementation */
+public:
+ virtual void NotifyScanDirectory(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Scanning directory: " << rLocalPath);
+ }
+ }
+ virtual void NotifyDirStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg)
+ {
+ BOX_WARNING("Failed to access directory: " << rLocalPath
+ << ": " << rErrorMsg);
+ }
+ virtual void NotifyFileStatFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg)
+ {
+ BOX_WARNING("Failed to access file: " << rLocalPath
+ << ": " << rErrorMsg);
+ }
+ virtual void NotifyDirListFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg)
+ {
+ BOX_WARNING("Failed to list directory: " << rLocalPath
+ << ": " << rErrorMsg);
+ }
+ virtual void NotifyMountPointSkipped(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ #ifdef WIN32
+ BOX_WARNING("Ignored directory: " << rLocalPath <<
+ ": is an NTFS junction/reparse point; create "
+ "a new location if you want to back it up");
+ #else
+ BOX_WARNING("Ignored directory: " << rLocalPath <<
+ ": is a mount point; create a new location "
+ "if you want to back it up");
+ #endif
+ }
+ virtual void NotifyFileExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Skipping excluded file: " << rLocalPath);
+ }
+ }
+ virtual void NotifyDirExcluded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Skipping excluded directory: " << rLocalPath);
+ }
+ }
+ virtual void NotifyUnsupportedFileType(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ BOX_WARNING("Ignoring file of unknown type: " << rLocalPath);
+ }
+ virtual void NotifyFileReadFailed(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const std::string& rErrorMsg)
+ {
+ BOX_WARNING("Error reading file: " << rLocalPath
+ << ": " << rErrorMsg);
+ }
+ virtual void NotifyFileModifiedInFuture(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ BOX_WARNING("Some files have modification times excessively "
+ "in the future. Check clock synchronisation. "
+ "Example file (only one shown): " << rLocalPath);
+ }
+ virtual void NotifyFileSkippedServerFull(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ BOX_WARNING("Skipped file: server is full: " << rLocalPath);
+ }
+ virtual void NotifyFileUploadException(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ const BoxException& rException)
+ {
+ if (rException.GetType() == CommonException::ExceptionType &&
+ rException.GetSubType() == CommonException::AccessDenied)
+ {
+ BOX_ERROR("Failed to upload file: " << rLocalPath
+ << ": Access denied");
+ }
+ else
+ {
+ BOX_ERROR("Failed to upload file: " << rLocalPath
+ << ": caught exception: " << rException.what()
+ << " (" << rException.GetType()
+ << "/" << rException.GetSubType() << ")");
+ }
+ }
+ virtual void NotifyFileUploadServerError(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int type, int subtype)
+ {
+ std::ostringstream msgs;
+ if (type != BackupProtocolClientError::ErrorType)
+ {
+ msgs << "unknown error type " << type;
+ }
+ else
+ {
+ switch(subtype)
+ {
+ case BackupProtocolClientError::Err_WrongVersion:
+ msgs << "WrongVersion";
+ break;
+ case BackupProtocolClientError::Err_NotInRightProtocolPhase:
+ msgs << "NotInRightProtocolPhase";
+ break;
+ case BackupProtocolClientError::Err_BadLogin:
+ msgs << "BadLogin";
+ break;
+ case BackupProtocolClientError::Err_CannotLockStoreForWriting:
+ msgs << "CannotLockStoreForWriting";
+ break;
+ case BackupProtocolClientError::Err_SessionReadOnly:
+ msgs << "SessionReadOnly";
+ break;
+ case BackupProtocolClientError::Err_FileDoesNotVerify:
+ msgs << "FileDoesNotVerify";
+ break;
+ case BackupProtocolClientError::Err_DoesNotExist:
+ msgs << "DoesNotExist";
+ break;
+ case BackupProtocolClientError::Err_DirectoryAlreadyExists:
+ msgs << "DirectoryAlreadyExists";
+ break;
+ case BackupProtocolClientError::Err_CannotDeleteRoot:
+ msgs << "CannotDeleteRoot";
+ break;
+ case BackupProtocolClientError::Err_TargetNameExists:
+ msgs << "TargetNameExists";
+ break;
+ case BackupProtocolClientError::Err_StorageLimitExceeded:
+ msgs << "StorageLimitExceeded";
+ break;
+ case BackupProtocolClientError::Err_DiffFromFileDoesNotExist:
+ msgs << "DiffFromFileDoesNotExist";
+ break;
+ case BackupProtocolClientError::Err_DoesNotExistInDirectory:
+ msgs << "DoesNotExistInDirectory";
+ break;
+ case BackupProtocolClientError::Err_PatchConsistencyError:
+ msgs << "PatchConsistencyError";
+ break;
+ default:
+ msgs << "unknown error subtype " << subtype;
+ }
+ }
+
+ BOX_ERROR("Failed to upload file: " << rLocalPath
+ << ": server error: " << msgs.str());
+ }
+ virtual void NotifyFileUploading(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Uploading complete file: " << rLocalPath);
+ }
+ }
+ virtual void NotifyFileUploadingPatch(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Uploading patch to file: " << rLocalPath);
+ }
+ }
+ virtual void NotifyFileUploaded(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Uploaded file: " << rLocalPath);
+ }
+ }
+ virtual void NotifyFileSynchronised(
+ const BackupClientDirectoryRecord* pDirRecord,
+ const std::string& rLocalPath,
+ int64_t FileSize)
+ {
+ if (mLogAllFileAccess)
+ {
+ BOX_INFO("Synchronised file: " << rLocalPath);
+ }
+ }
+
#ifdef WIN32
public:
void RunHelperThread(void);
private:
- bool mDoSyncFlagOut, mSyncIsForcedOut, mReceivedCommandConn;
+ bool mDoSyncFlagOut, mSyncIsForcedOut;
+ bool mInstallService, mRemoveService, mRunAsService;
+ std::string mServiceName;
+ HANDLE mhMessageToSendEvent, mhCommandReceivedEvent;
+ CRITICAL_SECTION mMessageQueueLock;
+ std::vector<std::string> mMessageList;
#endif
};
diff --git a/bin/bbackupd/ClientException.txt b/bin/bbackupd/ClientException.txt
new file mode 100644
index 00000000..04f88620
--- /dev/null
+++ b/bin/bbackupd/ClientException.txt
@@ -0,0 +1,11 @@
+
+# NOTE: Exception descriptions are for public distributions of Box Backup only -- do not rely for other applications.
+
+
+EXCEPTION Client 13
+
+Internal 0
+AssertFailed 1
+ClockWentBackwards 2 Invalid (negative) sync period: perhaps your clock is going backwards?
+FailedToDeleteStoreObjectInfoFile 3 Failed to delete the StoreObjectInfoFile, backup cannot continue safely.
+CorruptStoreObjectInfoFile 4 The store object info file contained an invalid value and is probably corrupt. Try deleting it.
diff --git a/bin/bbackupd/Makefile.extra b/bin/bbackupd/Makefile.extra
new file mode 100644
index 00000000..52e21366
--- /dev/null
+++ b/bin/bbackupd/Makefile.extra
@@ -0,0 +1,7 @@
+
+MAKEEXCEPTION = ../../lib/common/makeexception.pl
+
+# AUTOGEN SEEDING
+autogen_ClientException.h autogen_ClientException.cpp: $(MAKEEXCEPTION) ClientException.txt
+ $(PERL) $(MAKEEXCEPTION) ClientException.txt
+
diff --git a/bin/bbackupd/Win32BackupService.cpp b/bin/bbackupd/Win32BackupService.cpp
index 96f2b057..d275c891 100644
--- a/bin/bbackupd/Win32BackupService.cpp
+++ b/bin/bbackupd/Win32BackupService.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -50,40 +50,37 @@
#include "Win32BackupService.h"
-Win32BackupService gDaemonService;
+Win32BackupService* gpDaemonService = NULL;
extern HANDLE gStopServiceEvent;
+extern DWORD gServiceReturnCode;
unsigned int WINAPI RunService(LPVOID lpParameter)
{
- DWORD retVal = gDaemonService.WinService();
- SetEvent( gStopServiceEvent );
+ DWORD retVal = gpDaemonService->WinService((const char*) lpParameter);
+ gServiceReturnCode = retVal;
+ SetEvent(gStopServiceEvent);
return retVal;
}
void TerminateService(void)
{
- gDaemonService.SetTerminateWanted();
+ gpDaemonService->SetTerminateWanted();
}
-DWORD Win32BackupService::WinService(void)
+DWORD Win32BackupService::WinService(const char* pConfigFileName)
{
- int argc = 2;
- //first off get the path name for the default
- char buf[MAX_PATH];
-
- GetModuleFileName(NULL, buf, sizeof(buf));
- std::string buffer(buf);
- std::string conf( "-c");
- std::string cfile(buffer.substr(0,(buffer.find("bbackupd.exe")))
- + "bbackupd.conf");
+ DWORD ret;
- const char *argv[] = {conf.c_str(), cfile.c_str()};
+ if (pConfigFileName != NULL)
+ {
+ ret = this->Main(pConfigFileName);
+ }
+ else
+ {
+ ret = this->Main(BOX_GET_DEFAULT_BBACKUPD_CONFIG_FILE);
+ }
- MAINHELPER_START
-
- return this->Main(BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
-
- MAINHELPER_END
+ return ret;
}
#endif // WIN32
diff --git a/bin/bbackupd/Win32BackupService.h b/bin/bbackupd/Win32BackupService.h
index 980e5d6c..e71c93c5 100644
--- a/bin/bbackupd/Win32BackupService.h
+++ b/bin/bbackupd/Win32BackupService.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -50,7 +50,7 @@ class BackupDaemon;
class Win32BackupService : public BackupDaemon
{
public:
- DWORD WinService(void);
+ DWORD WinService(const char* pConfigFileName);
};
#endif // WIN32
diff --git a/bin/bbackupd/Win32ServiceFunctions.cpp b/bin/bbackupd/Win32ServiceFunctions.cpp
index 07791af3..276d004f 100644
--- a/bin/bbackupd/Win32ServiceFunctions.cpp
+++ b/bin/bbackupd/Win32ServiceFunctions.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -68,6 +68,7 @@ TCHAR* gServiceName = TEXT("Box Backup Service");
SERVICE_STATUS gServiceStatus;
SERVICE_STATUS_HANDLE gServiceStatusHandle = 0;
HANDLE gStopServiceEvent = 0;
+DWORD gServiceReturnCode = 0;
#define SERVICE_NAME "boxbackup"
@@ -81,8 +82,9 @@ void ErrorHandler(char *s, DWORD err)
{
char buf[256];
memset(buf, 0, sizeof(buf));
- _snprintf(buf, sizeof(buf)-1, "%s (%d)", s, err);
- ::syslog(LOG_ERR, "%s", buf);
+ _snprintf(buf, sizeof(buf)-1, "%s: %s", s,
+ GetErrorMessage(err).c_str());
+ BOX_ERROR(buf);
MessageBox(0, buf, "Error",
MB_OK | MB_SETFOREGROUND | MB_DEFAULT_DESKTOP_ONLY);
ExitProcess(err);
@@ -131,28 +133,30 @@ void WINAPI ServiceControlHandler( DWORD controlCode )
// It also returns on any error because the
// service cannot start if there is an eror.
+static char* spConfigFileName;
+
VOID ServiceMain(DWORD argc, LPTSTR *argv)
{
- // initialise service status
- gServiceStatus.dwServiceType = SERVICE_WIN32;
- gServiceStatus.dwCurrentState = SERVICE_STOPPED;
- gServiceStatus.dwControlsAccepted = 0;
- gServiceStatus.dwWin32ExitCode = NO_ERROR;
- gServiceStatus.dwServiceSpecificExitCode = NO_ERROR;
- gServiceStatus.dwCheckPoint = 0;
- gServiceStatus.dwWaitHint = 0;
-
- gServiceStatusHandle = RegisterServiceCtrlHandler(gServiceName,
- ServiceControlHandler);
-
- if (gServiceStatusHandle)
- {
- // service is starting
- gServiceStatus.dwCurrentState = SERVICE_START_PENDING;
- SetServiceStatus(gServiceStatusHandle, &gServiceStatus);
-
- // do initialisation here
- gStopServiceEvent = CreateEvent( 0, TRUE, FALSE, 0 );
+ // initialise service status
+ gServiceStatus.dwServiceType = SERVICE_WIN32;
+ gServiceStatus.dwCurrentState = SERVICE_STOPPED;
+ gServiceStatus.dwControlsAccepted = 0;
+ gServiceStatus.dwWin32ExitCode = NO_ERROR;
+ gServiceStatus.dwServiceSpecificExitCode = NO_ERROR;
+ gServiceStatus.dwCheckPoint = 0;
+ gServiceStatus.dwWaitHint = 0;
+
+ gServiceStatusHandle = RegisterServiceCtrlHandler(gServiceName,
+ ServiceControlHandler);
+
+ if (gServiceStatusHandle)
+ {
+ // service is starting
+ gServiceStatus.dwCurrentState = SERVICE_START_PENDING;
+ SetServiceStatus(gServiceStatusHandle, &gServiceStatus);
+
+ // do initialisation here
+ gStopServiceEvent = CreateEvent(0, TRUE, FALSE, 0);
if (!gStopServiceEvent)
{
gServiceStatus.dwControlsAccepted &=
@@ -167,7 +171,7 @@ VOID ServiceMain(DWORD argc, LPTSTR *argv)
NULL,
0,
RunService,
- 0,
+ spConfigFileName,
CREATE_SUSPENDED,
NULL);
@@ -176,7 +180,7 @@ VOID ServiceMain(DWORD argc, LPTSTR *argv)
// we are now running so tell the SCM
gServiceStatus.dwControlsAccepted |=
- (SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN);
+ (SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN);
gServiceStatus.dwCurrentState = SERVICE_RUNNING;
SetServiceStatus(gServiceStatusHandle, &gServiceStatus);
@@ -192,13 +196,23 @@ VOID ServiceMain(DWORD argc, LPTSTR *argv)
// service is now stopped
gServiceStatus.dwControlsAccepted &=
~(SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN);
+
gServiceStatus.dwCurrentState = SERVICE_STOPPED;
+
+ if (gServiceReturnCode != 0)
+ {
+ gServiceStatus.dwWin32ExitCode = ERROR_SERVICE_SPECIFIC_ERROR;
+ gServiceStatus.dwServiceSpecificExitCode = gServiceReturnCode;
+ }
+
SetServiceStatus(gServiceStatusHandle, &gServiceStatus);
- }
+ }
}
-void OurService(void)
+int OurService(const char* pConfigFileName)
{
+ spConfigFileName = strdup(pConfigFileName);
+
SERVICE_TABLE_ENTRY serviceTable[] =
{
{ SERVICE_NAME, (LPSERVICE_MAIN_FUNCTION) ServiceMain },
@@ -209,54 +223,119 @@ void OurService(void)
// Register with the SCM
success = StartServiceCtrlDispatcher(serviceTable);
+ free(spConfigFileName);
+ spConfigFileName = NULL;
+
if (!success)
{
ErrorHandler("Failed to start service. Did you start "
"Box Backup from the Service Control Manager? "
"(StartServiceCtrlDispatcher)", GetLastError());
+ return 1;
}
+
+ return 0;
}
-void InstallService(void)
+int InstallService(const char* pConfigFileName, const std::string& rServiceName)
{
- SC_HANDLE newService, scm;
+ if (pConfigFileName != NULL)
+ {
+ struct stat st;
- scm = OpenSCManager(0,0,SC_MANAGER_CREATE_SERVICE);
+ if (emu_stat(pConfigFileName, &st) != 0)
+ {
+ BOX_ERROR("Failed to open configuration file '" <<
+ pConfigFileName << "': " << strerror(errno));
+ return 1;
+ }
+
+ if (!(st.st_mode & S_IFREG))
+ {
+
+ BOX_ERROR("Failed to open configuration file '" <<
+ pConfigFileName << "': not a file");
+ return 1;
+ }
+ }
+
+ SC_HANDLE scm = OpenSCManager(0,0,SC_MANAGER_CREATE_SERVICE);
if (!scm)
{
- syslog(LOG_ERR, "Failed to open service control manager: "
- "error %d", GetLastError());
- return;
+ BOX_ERROR("Failed to open service control manager: " <<
+ GetErrorMessage(GetLastError()));
+ return 1;
}
char cmd[MAX_PATH];
GetModuleFileName(NULL, cmd, sizeof(cmd)-1);
cmd[sizeof(cmd)-1] = 0;
- char cmd_args[MAX_PATH];
- _snprintf(cmd_args, sizeof(cmd_args)-1, "%s --service", cmd);
- cmd_args[sizeof(cmd_args)-1] = 0;
+ std::string cmdWithArgs(cmd);
+ cmdWithArgs += " -s -S \"" + rServiceName + "\"";
- newService = CreateService(
+ if (pConfigFileName != NULL)
+ {
+ cmdWithArgs += " \"";
+ cmdWithArgs += pConfigFileName;
+ cmdWithArgs += "\"";
+ }
+
+ std::string serviceDesc = "Box Backup (" + rServiceName + ")";
+
+ SC_HANDLE newService = CreateService(
scm,
- SERVICE_NAME,
- "Box Backup",
+ rServiceName.c_str(),
+ serviceDesc.c_str(),
SERVICE_ALL_ACCESS,
SERVICE_WIN32_OWN_PROCESS,
SERVICE_AUTO_START,
SERVICE_ERROR_NORMAL,
- cmd_args,
+ cmdWithArgs.c_str(),
0,0,0,0,0);
+ DWORD err = GetLastError();
+ CloseServiceHandle(scm);
+
if (!newService)
{
- ::syslog(LOG_ERR, "Failed to create Box Backup service: "
- "error %d", GetLastError());
- return;
+ switch (err)
+ {
+ case ERROR_SERVICE_EXISTS:
+ {
+ BOX_ERROR("Failed to create Box Backup "
+ "service: it already exists");
+ }
+ break;
+
+ case ERROR_SERVICE_MARKED_FOR_DELETE:
+ {
+ BOX_ERROR("Failed to create Box Backup "
+ "service: it is waiting to be deleted");
+ }
+ break;
+
+ case ERROR_DUPLICATE_SERVICE_NAME:
+ {
+ BOX_ERROR("Failed to create Box Backup "
+ "service: a service with this name "
+ "already exists");
+ }
+ break;
+
+ default:
+ {
+ BOX_ERROR("Failed to create Box Backup "
+ "service: error " <<
+ GetErrorMessage(GetLastError()));
+ }
+ }
+
+ return 1;
}
- ::syslog(LOG_INFO, "Created Box Backup service");
+ BOX_INFO("Created Box Backup service");
SERVICE_DESCRIPTION desc;
desc.lpDescription = "Backs up your data files over the Internet";
@@ -264,50 +343,80 @@ void InstallService(void)
if (!ChangeServiceConfig2(newService, SERVICE_CONFIG_DESCRIPTION,
&desc))
{
- ::syslog(LOG_WARNING, "Failed to set description for "
- "Box Backup service: error %d", GetLastError());
+ BOX_WARNING("Failed to set description for Box Backup "
+ "service: " << GetErrorMessage(GetLastError()));
}
CloseServiceHandle(newService);
- CloseServiceHandle(scm);
+
+ return 0;
}
-void RemoveService(void)
+int RemoveService(const std::string& rServiceName)
{
- SC_HANDLE service, scm;
- SERVICE_STATUS status;
-
- scm = OpenSCManager(0,0,SC_MANAGER_CREATE_SERVICE);
+ SC_HANDLE scm = OpenSCManager(0,0,SC_MANAGER_CREATE_SERVICE);
if (!scm)
{
- syslog(LOG_ERR, "Failed to open service control manager: "
- "error %d", GetLastError());
- return;
+ BOX_ERROR("Failed to open service control manager: " <<
+ GetErrorMessage(GetLastError()));
+ return 1;
}
- service = OpenService(scm, SERVICE_NAME, SERVICE_ALL_ACCESS|DELETE);
- ControlService(service, SERVICE_CONTROL_STOP, &status);
+ SC_HANDLE service = OpenService(scm, rServiceName.c_str(),
+ SERVICE_ALL_ACCESS|DELETE);
+ DWORD err = GetLastError();
+ CloseServiceHandle(scm);
if (!service)
{
- syslog(LOG_ERR, "Failed to open Box Backup service: "
- "error %d", GetLastError());
- return;
+ if (err == ERROR_SERVICE_DOES_NOT_EXIST ||
+ err == ERROR_IO_PENDING)
+ // hello microsoft? anyone home?
+ {
+ BOX_ERROR("Failed to open Box Backup service: "
+ "not installed or not found");
+ }
+ else
+ {
+ BOX_ERROR("Failed to open Box Backup service: " <<
+ GetErrorMessage(err));
+ }
+ return 1;
+ }
+
+ SERVICE_STATUS status;
+ if (!ControlService(service, SERVICE_CONTROL_STOP, &status))
+ {
+ err = GetLastError();
+ if (err != ERROR_SERVICE_NOT_ACTIVE)
+ {
+ BOX_WARNING("Failed to stop Box Backup service: " <<
+ GetErrorMessage(err));
+ }
}
- if (DeleteService(service))
+ BOOL deleted = DeleteService(service);
+ err = GetLastError();
+ CloseServiceHandle(service);
+
+ if (deleted)
{
- syslog(LOG_INFO, "Box Backup service deleted");
+ BOX_INFO("Box Backup service deleted");
+ return 0;
+ }
+ else if (err == ERROR_SERVICE_MARKED_FOR_DELETE)
+ {
+ BOX_ERROR("Failed to remove Box Backup service: "
+ "it is already being deleted");
}
else
{
- syslog(LOG_ERR, "Failed to remove Box Backup service: "
- "error %d", GetLastError());
+ BOX_ERROR("Failed to remove Box Backup service: " <<
+ GetErrorMessage(err));
}
- CloseServiceHandle(service);
- CloseServiceHandle(scm);
+ return 1;
}
#endif // WIN32
diff --git a/bin/bbackupd/Win32ServiceFunctions.h b/bin/bbackupd/Win32ServiceFunctions.h
index 98856ca1..f85929b8 100644
--- a/bin/bbackupd/Win32ServiceFunctions.h
+++ b/bin/bbackupd/Win32ServiceFunctions.h
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -50,8 +50,8 @@
#ifndef WIN32SERVICEFUNCTIONS_H
#define WIN32SERVICEFUNCTIONS_H
-void RemoveService(void);
-void InstallService(void);
-void OurService(void);
+int RemoveService (const std::string& rServiceName);
+int InstallService (const char* pConfigFilePath, const std::string& rServiceName);
+int OurService (const char* pConfigFileName);
#endif
diff --git a/bin/bbackupd/bbackupd-config b/bin/bbackupd/bbackupd-config
index fb73e8d3..880f2e97 100755
--- a/bin/bbackupd/bbackupd-config
+++ b/bin/bbackupd/bbackupd-config
@@ -1,5 +1,5 @@
#!/usr/bin/perl
-# distribution boxbackup-0.10 (svn version: 494)
+# distribution boxbackup-0.11rc1 (svn version: 2023_2024)
#
# Copyright (c) 2003 - 2006
# Ben Summers and contributors. All rights reserved.
@@ -161,9 +161,9 @@ __E
print ' ',$_,"\n" for(@tobackup);
print <<__E;
-Note: If other file systems are mounted inside these directories, then problems may occur
-with files on the store server being renamed incorrectly. This will cause efficiency
-problems, but not affect the integrity of the backups.
+Note: If other file systems are mounted inside these directories, then
+they will NOT be backed up. You will have to create separate locations for
+any mounted filesystems inside your backup locations.
WARNING: Directories not checked against mountpoints. Check mounted filesystems manually.
@@ -249,12 +249,24 @@ $sendmail = 'sendmail' if $sendmail !~ m/\S/;
print NOTIFY <<__EOS;
#!/bin/sh
+# This script is run whenever bbackupd changes state or encounters a
+# problem which requires the system administrator to assist:
+#
+# 1) The store is full, and no more data can be uploaded.
+# 2) Some files or directories were not readable.
+# 3) A backup run starts or finishes.
+#
+# The default script emails the system administrator, except for backups
+# starting and stopping, where it does nothing.
+
SUBJECT="BACKUP PROBLEM on host $hostname"
SENDTO="$current_username"
-if [ \$1 = store-full ]
-then
-$sendmail \$SENDTO <<EOM
+if [ "\$1" = "" ]; then
+ echo "Usage: $0 <store-full|read-error|backup-error|backup-start|backup-finish>" >&2
+ exit 2
+elif [ "\$1" = store-full ]; then
+ $sendmail \$SENDTO <<EOM
Subject: \$SUBJECT (store full)
To: \$SENDTO
@@ -268,8 +280,7 @@ FILES ARE NOT BEING BACKED UP
Please adjust the limits on account $account_num on server $server.
EOM
-elif [ \$1 = read-error ]
-then
+elif [ "\$1" = read-error ]; then
$sendmail \$SENDTO <<EOM
Subject: \$SUBJECT (read errors)
To: \$SENDTO
@@ -282,11 +293,14 @@ THESE FILES ARE NOT BEING BACKED UP
===================================
Check the logs on $hostname for the files and directories which caused
-these errors, and take appropraite action.
+these errors, and take appropriate action.
Other files are being backed up.
EOM
+elif [ "\$1" = backup-start -o "\$1" = backup-finish ]; then
+ # do nothing by default
+ true
else
$sendmail \$SENDTO <<EOM
Subject: \$SUBJECT (unknown)
@@ -325,11 +339,15 @@ TrustedCAsFile = $ca_root_cert
DataDirectory = $working_dir
-# This script is run whenever bbackupd encounters a problem which requires
-# the system administrator to assist:
+# This script is run whenever bbackupd changes state or encounters a
+# problem which requires the system administrator to assist:
+#
# 1) The store is full, and no more data can be uploaded.
# 2) Some files or directories were not readable.
-# The default script emails the system administrator.
+# 3) A backup run starts or finishes.
+#
+# The default script emails the system administrator, except for backups
+# starting and stopping, where it does nothing.
NotifyScript = $notify_script
@@ -340,24 +358,46 @@ if($backup_mode eq 'lazy')
# lazy mode configuration
print CONFIG <<__E;
-# A scan of the local discs will be made once an hour (approximately).
-# To avoid cycles of load on the server, this time is randomly adjusted by a small
+# The number of seconds between backup runs under normal conditions. To avoid
+# cycles of load on the server, this time is randomly adjusted by a small
# percentage as the daemon runs.
UpdateStoreInterval = 3600
-# A file must have been modified at least 6 hours ago before it will be uploaded.
+# The minimum age of a file, in seconds, that will be uploaded. Avoids
+# repeated uploads of a file which is constantly being modified.
MinimumFileAge = 21600
-# If a file is modified repeated, it won't be uploaded immediately in case it's modified again.
-# However, it should be uploaded eventually. This is how long we should wait after first noticing
-# a change. (1 day)
+# If a file is modified repeated, it won't be uploaded immediately in case
+# it's modified again, due to the MinimumFileAge specified above. However, it
+# should be uploaded eventually even if it is being modified repeatedly. This
+# is how long we should wait, in seconds, after first noticing a change.
+# (86400 seconds = 1 day)
MaxUploadWait = 86400
+# If the connection is idle for some time (e.g. over 10 minutes or 600
+# seconds, not sure exactly how long) then the server will give up and
+# disconnect the client, resulting in Connection Protocol_Timeout errors
+# on the server and TLSReadFailed or TLSWriteFailed errors on the client.
+# Also, some firewalls and NAT gateways will kill idle connections after
+# similar lengths of time.
+#
+# This can happen for example when most files are backed up already and
+# don't need to be sent to the store again, while scanning a large
+# directory, or while calculating diffs of a large file. To avoid this,
+# KeepAliveTime specifies that special keep-alive messages should be sent
+# when the connection is otherwise idle for a certain length of time,
+# specified here in seconds.
+#
+# The default is that these messages are never sent, equivalent to setting
+# this option to zero, but we recommend that all users enable this.
+
+KeepAliveTime = 120
+
__E
}
else
@@ -390,13 +430,17 @@ FileTrackingSizeThreshold = 65535
DiffingUploadSizeThreshold = 8192
-# The limit on how much time is spent diffing files. Most files shouldn't take very long,
-# but if you have really big files you can use this to limit the time spent diffing them.
+# The limit on how much time is spent diffing files, in seconds. Most files
+# shouldn't take very long, but if you have really big files you can use this
+# to limit the time spent diffing them.
+#
# * Reduce if you are having problems with processor usage.
-# * Increase if you have large files, and think the upload of changes is too large and want
-# to spend more time searching for unchanged blocks.
+#
+# * Increase if you have large files, and think the upload of changes is too
+# large and you want bbackupd to spend more time searching for unchanged
+# blocks.
-MaximumDiffingTime = 20
+MaximumDiffingTime = 120
# Uncomment this line to see exactly what the daemon is going when it's connected to the server.
@@ -404,14 +448,20 @@ MaximumDiffingTime = 20
# ExtendedLogging = yes
-# Use this to temporarily stop bbackupd from syncronising or connecting to the store.
-# This specifies a program or script script which is run just before each sync, and ideally
-# the full path to the interpreter. It will be run as the same user bbackupd is running as,
-# usually root.
-# The script prints either "now" or a number to STDOUT (and a terminating newline, no quotes).
-# If the result was "now", then the sync will happen. If it's a number, then the script will
-# be asked again in that number of seconds.
-# For example, you could use this on a laptop to only backup when on a specific network.
+# This specifies a program or script script which is run just before each
+# sync, and ideally the full path to the interpreter. It will be run as the
+# same user bbackupd is running as, usually root.
+#
+# The script must output (print) either "now" or a number to STDOUT (and a
+# terminating newline, no quotes).
+#
+# If the result was "now", then the sync will happen. If it's a number, then
+# no backup will happen for that number of seconds (bbackupd will pause) and
+# then the script will be run again.
+#
+# Use this to temporarily stop bbackupd from syncronising or connecting to the
+# store. For example, you could use this on a laptop to only backup when on a
+# specific network, or when it has a working Internet connection.
# SyncAllowScript = /path/to/intepreter/or/exe script-name parameters etc
@@ -434,7 +484,7 @@ Server
PidFile = /var/run/bbackupd.pid
}
-#
+
# BackupLocations specifies which locations on disc should be backed up. Each
# directory is in the format
#
@@ -456,22 +506,38 @@ Server
# For example:
#
# ExcludeDir = /home/guest-user
-# ExcludeFilesRegex = *.(mp3|MP3)\$
+# ExcludeFilesRegex = \.(mp3|MP3)\$
# AlwaysIncludeFile = /home/username/veryimportant.mp3
#
# This excludes the directory /home/guest-user from the backup along with all mp3
# files, except one MP3 file in particular.
#
# In general, Exclude excludes a file or directory, unless the directory is
-# explicitly mentioned in a AlwaysInclude directive.
+# explicitly mentioned in a AlwaysInclude directive. However, Box Backup
+# does NOT scan inside excluded directories and will never back up an
+# AlwaysIncluded file or directory inside an excluded directory or any
+# subdirectory thereof.
+#
+# To back up a directory inside an excluded directory, use a configuration
+# like this, to ensure that each directory in the path to the important
+# files is included, but none of their contents will be backed up except
+# the directories further down that path to the important one.
+#
+# ExcludeDirsRegex = ^/home/user/bigfiles/
+# ExcludeFilesRegex = ^/home/user/bigfiles/
+# AlwaysIncludeDir = /home/user/bigfiles/path
+# AlwaysIncludeDir = /home/user/bigfiles/path/to
+# AlwaysIncludeDir = /home/user/bigfiles/path/important
+# AlwaysIncludeDir = /home/user/bigfiles/path/important/files
+# AlwaysIncludeDirsRegex = ^/home/user/bigfiles/path/important/files/
+# AlwaysIncludeFilesRegex = ^/home/user/bigfiles/path/important/files/
#
-# If a directive ends in Regex, then it is a regular expression rather than a
+# If a directive ends in Regex, then it is a regular expression rather than a
# explicit full pathname. See
#
# man 7 re_format
#
# for the regex syntax on your platform.
-#
BackupLocations
{
diff --git a/bin/bbackupd/bbackupd-config.in b/bin/bbackupd/bbackupd-config.in
new file mode 100755
index 00000000..adb5d5e6
--- /dev/null
+++ b/bin/bbackupd/bbackupd-config.in
@@ -0,0 +1,599 @@
+#!@PERL@
+use strict;
+
+# should be running as root
+if($> != 0)
+{
+ printf "\nWARNING: this should be run as root\n\n"
+}
+
+sub error_print_usage
+{
+ print <<__E;
+
+Setup bbackupd config utility.
+
+Bad command line parameters.
+Usage:
+ bbackupd-config config-dir backup-mode account-num server-hostname working-dir backup-dir [more backup directories]
+
+config-dir usually /etc/box
+backup-mode is lazy or snapshot
+ lazy mode runs continously, uploading files over a specified age
+ snapshot mode uploads a snapshot of the filesystem when instructed explicitly
+account-num (hexdecimal) and server-hostname as supplied from the server administrator
+working-dir usually /var/bbackupd
+backup-dir, list of directories to back up
+
+__E
+ print "=========\nERROR:\n",$_[0],"\n\n" if $_[0] ne '';
+ exit(1);
+}
+
+# check and get command line parameters
+if($#ARGV < 4)
+{
+ error_print_usage();
+}
+
+# check for OPENSSL_CONF environment var being set
+if(exists $ENV{'OPENSSL_CONF'})
+{
+ print <<__E;
+
+---------------------------------------
+
+WARNING:
+ You have the OPENSSL_CONF environment variable set.
+ Use of non-standard openssl configs may cause problems.
+
+---------------------------------------
+
+__E
+}
+
+# default locations
+my $default_config_location = '/etc/box/bbackupd.conf';
+
+# command line parameters
+my ($config_dir,$backup_mode,$account_num,$server,$working_dir,@tobackup) = @ARGV;
+
+# check backup mode is valid
+if($backup_mode ne 'lazy' && $backup_mode ne 'snapshot')
+{
+ error_print_usage("ERROR: backup mode must be 'lazy' or 'snapshot'");
+}
+
+# check server exists
+{
+ my @r = gethostbyname($server);
+ if($#r < 0)
+ {
+ error_print_usage("Backup server specified as '$server', but it could not found.\n(A test DNS lookup failed -- check arguments)");
+ }
+}
+
+if($working_dir !~ m~\A/~)
+{
+ error_print_usage("Working directory $working_dir is not specified as an absolute path");
+}
+
+# ssl stuff
+my $private_key = "$config_dir/bbackupd/$account_num-key.pem";
+my $certificate_request = "$config_dir/bbackupd/$account_num-csr.pem";
+my $certificate = "$config_dir/bbackupd/$account_num-cert.pem";
+my $ca_root_cert = "$config_dir/bbackupd/serverCA.pem";
+
+# encryption keys
+my $enc_key_file = "$config_dir/bbackupd/$account_num-FileEncKeys.raw";
+
+# other files
+my $config_file = "$config_dir/bbackupd.conf";
+my $notify_script = "$config_dir/bbackupd/NotifySysadmin.sh";
+
+# check that the directories are allowable
+for(@tobackup)
+{
+ if($_ eq '/')
+ {
+ die "It is not recommended that you backup the root directory of your disc";
+ }
+ if($_ !~ m/\A\//)
+ {
+ die "Directory $_ is not specified as an absolute path";
+ }
+ if(!-d $_)
+ {
+ die "$_ is not a directory";
+ }
+}
+
+# summarise configuration
+
+print <<__E;
+
+Setup bbackupd config utility.
+
+Configuration:
+ Writing configuration file: $config_file
+ Account: $account_num
+ Server hostname: $server
+ Directories to back up:
+__E
+print ' ',$_,"\n" for(@tobackup);
+print <<__E;
+
+Note: If other file systems are mounted inside these directories, then
+they will NOT be backed up. You will have to create separate locations for
+any mounted filesystems inside your backup locations.
+
+WARNING: Directories not checked against mountpoints. Check mounted filesystems manually.
+
+__E
+
+# create directories
+if(!-d $config_dir)
+{
+ printf "Creating $config_dir...\n";
+ mkdir $config_dir,0755 or die "Can't create $config_dir";
+}
+
+if(!-d "$config_dir/bbackupd")
+{
+ printf "Creating $config_dir/bbackupd\n";
+ mkdir "$config_dir/bbackupd",0700 or die "Can't create $config_dir/bbackupd";
+}
+
+if(!-d "$working_dir")
+{
+ printf "Creating $working_dir\n";
+ if(!mkdir($working_dir,0700))
+ {
+ die "Couldn't create $working_dir -- create this manually and try again\n";
+ }
+}
+
+# generate the private key for the server
+if(!-f $private_key)
+{
+ print "Generating private key...\n";
+ if(system("openssl genrsa -out $private_key 2048") != 0)
+ {
+ die "Couldn't generate private key."
+ }
+}
+
+# generate a certificate request
+if(!-f $certificate_request)
+{
+ die "Couldn't run openssl for CSR generation" unless
+ open(CSR,"|openssl req -new -key $private_key -sha1 -out $certificate_request");
+ print CSR <<__E;
+.
+.
+.
+.
+.
+BACKUP-$account_num
+.
+.
+.
+
+__E
+ close CSR;
+ print "\n\n";
+ die "Certificate request wasn't created.\n" unless -f $certificate_request
+}
+
+# generate the key material for the file
+if(!-f $enc_key_file)
+{
+ print "Generating keys for file backup\n";
+ if(system("openssl rand -out $enc_key_file 1024") != 0)
+ {
+ die "Couldn't generate file backup keys."
+ }
+}
+
+# write the notify when store full script
+print "Writing notify script $notify_script\n";
+open NOTIFY,">$notify_script" or die "Can't open for writing";
+
+my $hostname = `hostname`; chomp $hostname;
+my $current_username = `whoami`; chomp $current_username;
+my $sendmail = `whereis sendmail`; chomp $sendmail;
+$sendmail =~ s/\n.\Z//s;
+# for Linux style whereis
+$sendmail = $1 if $sendmail =~ /^sendmail:\s+([\S]+)/;
+# last ditch guess
+$sendmail = 'sendmail' if $sendmail !~ m/\S/;
+
+print NOTIFY <<__EOS;
+#!/bin/sh
+
+# This script is run whenever bbackupd changes state or encounters a
+# problem which requires the system administrator to assist:
+#
+# 1) The store is full, and no more data can be uploaded.
+# 2) Some files or directories were not readable.
+# 3) A backup run starts or finishes.
+#
+# The default script emails the system administrator, except for backups
+# starting and stopping, where it does nothing.
+
+SUBJECT="BACKUP PROBLEM on host $hostname"
+SENDTO="$current_username"
+
+if [ "\$1" = "" ]; then
+ echo "Usage: $0 <store-full|read-error|backup-error|backup-start|backup-finish>" >&2
+ exit 2
+elif [ "\$1" = store-full ]; then
+ $sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (store full)
+To: \$SENDTO
+
+
+The store account for $hostname is full.
+
+=============================
+FILES ARE NOT BEING BACKED UP
+=============================
+
+Please adjust the limits on account $account_num on server $server.
+
+EOM
+elif [ "\$1" = read-error ]; then
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (read errors)
+To: \$SENDTO
+
+
+Errors occured reading some files or directories for backup on $hostname.
+
+===================================
+THESE FILES ARE NOT BEING BACKED UP
+===================================
+
+Check the logs on $hostname for the files and directories which caused
+these errors, and take appropriate action.
+
+Other files are being backed up.
+
+EOM
+elif [ "\$1" = backup-start -o "\$1" = backup-finish ]; then
+ # do nothing by default
+ true
+else
+$sendmail \$SENDTO <<EOM
+Subject: \$SUBJECT (unknown)
+To: \$SENDTO
+
+
+The backup daemon on $hostname reported an unknown error.
+
+==========================
+FILES MAY NOT BE BACKED UP
+==========================
+
+Please check the logs on $hostname.
+
+EOM
+fi
+__EOS
+
+close NOTIFY;
+chmod 0700,$notify_script or die "Can't chmod $notify_script";
+
+
+# write the configuration file
+print "Writing configuration file $config_file\n";
+open CONFIG,">$config_file" or die "Can't open config file for writing";
+print CONFIG <<__E;
+
+StoreHostname = $server
+AccountNumber = 0x$account_num
+KeysFile = $enc_key_file
+
+CertificateFile = $certificate
+PrivateKeyFile = $private_key
+TrustedCAsFile = $ca_root_cert
+
+DataDirectory = $working_dir
+
+
+# This script is run whenever bbackupd changes state or encounters a
+# problem which requires the system administrator to assist:
+#
+# 1) The store is full, and no more data can be uploaded.
+# 2) Some files or directories were not readable.
+# 3) A backup run starts or finishes.
+#
+# The default script emails the system administrator, except for backups
+# starting and stopping, where it does nothing.
+
+NotifyScript = $notify_script
+
+__E
+
+if($backup_mode eq 'lazy')
+{
+ # lazy mode configuration
+ print CONFIG <<__E;
+
+# The number of seconds between backup runs under normal conditions. To avoid
+# cycles of load on the server, this time is randomly adjusted by a small
+# percentage as the daemon runs.
+
+UpdateStoreInterval = 3600
+
+
+# The minimum age of a file, in seconds, that will be uploaded. Avoids
+# repeated uploads of a file which is constantly being modified.
+
+MinimumFileAge = 21600
+
+
+# If a file is modified repeated, it won't be uploaded immediately in case
+# it's modified again, due to the MinimumFileAge specified above. However, it
+# should be uploaded eventually even if it is being modified repeatedly. This
+# is how long we should wait, in seconds, after first noticing a change.
+# (86400 seconds = 1 day)
+
+MaxUploadWait = 86400
+
+# If the connection is idle for some time (e.g. over 10 minutes or 600
+# seconds, not sure exactly how long) then the server will give up and
+# disconnect the client, resulting in Connection Protocol_Timeout errors
+# on the server and TLSReadFailed or TLSWriteFailed errors on the client.
+# Also, some firewalls and NAT gateways will kill idle connections after
+# similar lengths of time.
+#
+# This can happen for example when most files are backed up already and
+# don't need to be sent to the store again, while scanning a large
+# directory, or while calculating diffs of a large file. To avoid this,
+# KeepAliveTime specifies that special keep-alive messages should be sent
+# when the connection is otherwise idle for a certain length of time,
+# specified here in seconds.
+#
+# The default is that these messages are never sent, equivalent to setting
+# this option to zero, but we recommend that all users enable this.
+
+KeepAliveTime = 120
+
+__E
+}
+else
+{
+ # snapshot configuration
+ print CONFIG <<__E;
+
+# This configuration file is written for snapshot mode.
+# You will need to run bbackupctl to instruct the daemon to upload files.
+
+AutomaticBackup = no
+UpdateStoreInterval = 0
+MinimumFileAge = 0
+MaxUploadWait = 0
+
+__E
+}
+
+print CONFIG <<__E;
+
+# Files above this size (in bytes) are tracked, and if they are renamed they will simply be
+# renamed on the server, rather than being uploaded again. (64k - 1)
+
+FileTrackingSizeThreshold = 65535
+
+
+# The daemon does "changes only" uploads for files above this size (in bytes).
+# Files less than it are uploaded whole without this extra processing.
+
+DiffingUploadSizeThreshold = 8192
+
+
+# The limit on how much time is spent diffing files, in seconds. Most files
+# shouldn't take very long, but if you have really big files you can use this
+# to limit the time spent diffing them.
+#
+# * Reduce if you are having problems with processor usage.
+#
+# * Increase if you have large files, and think the upload of changes is too
+# large and you want bbackupd to spend more time searching for unchanged
+# blocks.
+
+MaximumDiffingTime = 120
+
+
+# Uncomment this line to see exactly what the daemon is going when it's connected to the server.
+
+# ExtendedLogging = yes
+
+
+# This specifies a program or script script which is run just before each
+# sync, and ideally the full path to the interpreter. It will be run as the
+# same user bbackupd is running as, usually root.
+#
+# The script must output (print) either "now" or a number to STDOUT (and a
+# terminating newline, no quotes).
+#
+# If the result was "now", then the sync will happen. If it's a number, then
+# no backup will happen for that number of seconds (bbackupd will pause) and
+# then the script will be run again.
+#
+# Use this to temporarily stop bbackupd from syncronising or connecting to the
+# store. For example, you could use this on a laptop to only backup when on a
+# specific network, or when it has a working Internet connection.
+
+# SyncAllowScript = /path/to/intepreter/or/exe script-name parameters etc
+
+
+# Where the command socket is created in the filesystem.
+
+CommandSocket = /var/run/bbackupd.sock
+
+# Uncomment the StoreObjectInfoFile to enable the experimental archiving
+# of the daemon's state (including client store marker and configuration)
+# between backup runs. This saves time and increases efficiency when
+# bbackupd is frequently stopped and started, since it removes the need
+# to rescan all directories on the remote server. However, it is new and
+# not yet heavily tested, so use with caution.
+
+# StoreObjectInfoFile = $working_dir/bbackupd.state
+
+Server
+{
+ PidFile = /var/run/bbackupd.pid
+}
+
+
+# BackupLocations specifies which locations on disc should be backed up. Each
+# directory is in the format
+#
+# name
+# {
+# Path = /path/of/directory
+# (optional exclude directives)
+# }
+#
+# 'name' is derived from the Path by the config script, but should merely be
+# unique.
+#
+# The exclude directives are of the form
+#
+# [Exclude|AlwaysInclude][File|Dir][|sRegex] = regex or full pathname
+#
+# (The regex suffix is shown as 'sRegex' to make File or Dir plural)
+#
+# For example:
+#
+# ExcludeDir = /home/guest-user
+# ExcludeFilesRegex = \.(mp3|MP3)\$
+# AlwaysIncludeFile = /home/username/veryimportant.mp3
+#
+# This excludes the directory /home/guest-user from the backup along with all mp3
+# files, except one MP3 file in particular.
+#
+# In general, Exclude excludes a file or directory, unless the directory is
+# explicitly mentioned in a AlwaysInclude directive. However, Box Backup
+# does NOT scan inside excluded directories and will never back up an
+# AlwaysIncluded file or directory inside an excluded directory or any
+# subdirectory thereof.
+#
+# To back up a directory inside an excluded directory, use a configuration
+# like this, to ensure that each directory in the path to the important
+# files is included, but none of their contents will be backed up except
+# the directories further down that path to the important one.
+#
+# ExcludeDirsRegex = ^/home/user/bigfiles/
+# ExcludeFilesRegex = ^/home/user/bigfiles/
+# AlwaysIncludeDir = /home/user/bigfiles/path
+# AlwaysIncludeDir = /home/user/bigfiles/path/to
+# AlwaysIncludeDir = /home/user/bigfiles/path/important
+# AlwaysIncludeDir = /home/user/bigfiles/path/important/files
+# AlwaysIncludeDirsRegex = ^/home/user/bigfiles/path/important/files/
+# AlwaysIncludeFilesRegex = ^/home/user/bigfiles/path/important/files/
+#
+# If a directive ends in Regex, then it is a regular expression rather than a
+# explicit full pathname. See
+#
+# man 7 re_format
+#
+# for the regex syntax on your platform.
+
+BackupLocations
+{
+__E
+
+# write the dirs to backup
+for my $d (@tobackup)
+{
+ $d =~ m/\A.(.+)\Z/;
+ my $n = $1;
+ $n =~ tr`/`-`;
+
+ my $excludekeys = '';
+ if(substr($enc_key_file, 0, length($d)+1) eq $d.'/')
+ {
+ $excludekeys = "\t\tExcludeFile = $enc_key_file\n";
+ print <<__E;
+
+NOTE: Keys file has been explicitly excluded from the backup.
+
+__E
+ }
+
+ print CONFIG <<__E
+ $n
+ {
+ Path = $d
+$excludekeys }
+__E
+}
+
+print CONFIG "}\n\n";
+close CONFIG;
+
+# explain to the user what they need to do next
+my $daemon_args = ($config_file eq $default_config_location)?'':" $config_file";
+my $ctl_daemon_args = ($config_file eq $default_config_location)?'':" -c $config_file";
+
+print <<__E;
+
+===================================================================
+
+bbackupd basic configuration complete.
+
+What you need to do now...
+
+1) Make a backup of $enc_key_file
+ This should be a secure offsite backup.
+ Without it, you cannot restore backups. Everything else can
+ be replaced. But this cannot.
+ KEEP IT IN A SAFE PLACE, OTHERWISE YOUR BACKUPS ARE USELESS.
+
+2) Send $certificate_request
+ to the administrator of the backup server, and ask for it to
+ be signed.
+
+3) The administrator will send you two files. Install them as
+ $certificate
+ $ca_root_cert
+ after checking their authenticity.
+
+4) You may wish to read the configuration file
+ $config_file
+ and adjust as appropraite.
+
+ There are some notes in it on excluding files you do not
+ wish to be backed up.
+
+5) Review the script
+ $notify_script
+ and check that it will email the right person when the store
+ becomes full. This is important -- when the store is full, no
+ more files will be backed up. You want to know about this.
+
+6) Start the backup daemon with the command
+ /usr/local/bin/bbackupd$daemon_args
+ in /etc/rc.local, or your local equivalent.
+ Note that bbackupd must run as root.
+__E
+if($backup_mode eq 'snapshot')
+{
+ print <<__E;
+
+7) Set up a cron job to run whenever you want a snapshot of the
+ file system to be taken. Run the command
+ /usr/local/bin/bbackupctl -q$ctl_daemon_args sync
+__E
+}
+print <<__E;
+
+===================================================================
+
+Remember to make a secure, offsite backup of your backup keys,
+as described in step 1 above. If you do not, you have no backups.
+
+__E
+
diff --git a/bin/bbackupd/bbackupd.cpp b/bin/bbackupd/bbackupd.cpp
index 1ccfc7f8..c8320454 100644
--- a/bin/bbackupd/bbackupd.cpp
+++ b/bin/bbackupd/bbackupd.cpp
@@ -1,4 +1,4 @@
-// distribution boxbackup-0.10 (svn version: 494)
+// distribution boxbackup-0.11rc1 (svn version: 2023_2024)
//
// Copyright (c) 2003 - 2006
// Ben Summers and contributors. All rights reserved.
@@ -50,6 +50,7 @@
#include "MainHelper.h"
#include "BoxPortsAndFiles.h"
#include "BackupStoreException.h"
+#include "Logging.h"
#include "MemLeakFindOn.h"
@@ -57,82 +58,37 @@
#include "Win32ServiceFunctions.h"
#include "Win32BackupService.h"
- extern Win32BackupService gDaemonService;
+ extern Win32BackupService* gpDaemonService;
#endif
int main(int argc, const char *argv[])
{
- MAINHELPER_START
-
-#ifdef WIN32
-
- ::openlog("Box Backup (bbackupd)", 0, 0);
+ int ExitCode = 0;
- if(argc == 2 &&
- (::strcmp(argv[1], "--help") == 0 ||
- ::strcmp(argv[1], "-h") == 0))
- {
- printf("-h help, -i install service, -r remove service,\n"
- "-c <config file> start daemon now");
- return 2;
- }
- if(argc == 2 && ::strcmp(argv[1], "-r") == 0)
- {
- RemoveService();
- return 0;
- }
- if(argc == 2 && ::strcmp(argv[1], "-i") == 0)
- {
- InstallService();
- return 0;
- }
+ MAINHELPER_START
- bool runAsWin32Service = false;
- if (argc == 2 && ::strcmp(argv[1], "--service") == 0)
- {
- runAsWin32Service = true;
- }
+ Logging::SetProgramName("Box Backup (bbackupd)");
+ Logging::ToConsole(true);
+ Logging::ToSyslog (true);
- // Under win32 we must initialise the Winsock library
- // before using sockets
-
- WSADATA info;
-
- if (WSAStartup(0x0101, &info) == SOCKET_ERROR)
- {
- // box backup will not run without sockets
- ::syslog(LOG_ERR, "Failed to initialise Windows Sockets");
- THROW_EXCEPTION(BackupStoreException, Internal)
- }
+#ifdef WIN32
EnableBackupRights();
- int ExitCode = 0;
-
- if (runAsWin32Service)
- {
- syslog(LOG_INFO,"Starting Box Backup Service");
- OurService();
- }
- else
- {
- ExitCode = gDaemonService.Main(
- BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
- }
-
- // Clean up our sockets
- WSACleanup();
-
- ::closelog();
-
- return ExitCode;
+ gpDaemonService = new Win32BackupService();
+ ExitCode = gpDaemonService->Daemon::Main(
+ BOX_GET_DEFAULT_BBACKUPD_CONFIG_FILE,
+ argc, argv);
+ delete gpDaemonService;
#else // !WIN32
BackupDaemon daemon;
- return daemon.Main(BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
+ ExitCode = daemon.Main(BOX_FILE_BBACKUPD_DEFAULT_CONFIG, argc, argv);
#endif // WIN32
MAINHELPER_END
+
+ return ExitCode;
}
diff --git a/bin/bbackupd/win32/NotifySysAdmin.vbs b/bin/bbackupd/win32/NotifySysAdmin.vbs
new file mode 100644
index 00000000..49082887
--- /dev/null
+++ b/bin/bbackupd/win32/NotifySysAdmin.vbs
@@ -0,0 +1,95 @@
+Dim hostname
+Dim account
+Dim from
+Dim sendto
+Dim subjtmpl
+Dim subject
+Dim body
+Dim smtpserver
+
+Set WshNet = CreateObject("WScript.Network")
+hostname = WshNet.ComputerName
+
+account = "0a1"
+from = "boxbackup@" & hostname
+sendto = "admin@example.com"
+subjtmpl = "BACKUP PROBLEM on host " & hostname
+smtpserver = "smtp.example.com"
+
+Set args = WScript.Arguments
+
+If args(0) = "store-full" Then
+ subject = subjtmpl & " (store full)"
+ body = "The store account for "&hostname&" is full." & vbCrLf & vbCrLf & _
+ "=============================" & vbCrLf & _
+ "FILES ARE NOT BEING BACKED UP" & vbCrLf & _
+ "=============================" & vbCrLf & vbCrLf & _
+ "Please adjust the limits on account "&account&" on server "&hostname&"." _
+ & vbCrLf
+ SendMail from,sendto,subject,body
+ElseIf args(0) = "read-error" Then
+ subject = subjtmpl & " (read errors)"
+ body = "Errors occured reading some files or directories for backup on "&hostname&"." _
+ & vbCrLf & vbCrLf & _
+ "===================================" & vbCrLf & _
+ "THESE FILES ARE NOT BEING BACKED UP" & vbCrLf & _
+ "===================================" & vbCrLf & vbCrLf & _
+ "Check the logs on "&hostname&" for the files and directories which caused" & _
+ "these errors, and take appropraite action." & vbCrLf & vbCrLf & _
+ "Other files are being backed up." & vbCrLf
+ SendMail from,sendto,subject,body
+ElseIf args(0) = "backup-start" Or args(0) = "backup-finish" Then
+ ' do nothing for these messages by default
+Else
+ subject = subjtmpl & " (unknown)"
+ body = "The backup daemon on "&hostname&" reported an unknown error." _
+ & vbCrLf & vbCrLf & _
+ "==========================" & vbCrLf & _
+ "FILES MAY NOT BE BACKED UP" & vbCrLf & _
+ "==========================" & vbCrLf & vbCrLf & _
+ "Please check the logs on "&hostname&"." & vbCrLf
+ SendMail from,sendto,subject,body
+End If
+
+Function CheckSMTPSvc()
+ Set objWMISvc = GetObject("winmgmts:" _
+ & "{impersonationLevel=impersonate}!\\.\root\cimv2")
+ Set colSMTPSvc = objWMISvc.ExecQuery("Select * From Win32_Service " _
+ & "Where Name='SMTPSVC'")
+ If colSMTPSvc.Count > 0 Then
+ CheckSMTPSvc = True
+ Else
+ CheckSMTPSvc = False
+ End If
+End Function
+
+Sub SendMail(from,sendto,subject,body)
+ Set objEmail = CreateObject("CDO.Message")
+ Set WshShell = CreateObject("WScript.Shell")
+ Dim cdoschema
+ cdoschema = "http://schemas.microsoft.com/cdo/configuration/"
+
+ With objEmail
+ .From = from
+ .To = sendto
+ .Subject = subject
+ .TextBody = body
+ If CheckSMTPSvc = False Then
+ .Configuration.Fields.Item(cdoschema & "sendusing") = 2
+ .Configuration.Fields.Item(cdoschema & "smtpserver") = smtpserver
+ .Configuration.Fields.Item(cdoschema & "smtpserverport") = 25
+ .Configuration.Fields.Update
+ End If
+ End With
+ On Error Resume Next
+ rc = objEmail.Send
+ If rc Then
+ WshShell.Exec "eventcreate /L Application /ID 201 /T WARNING " _
+ & "/SO ""Box Backup"" /D """ & args(0) _
+ & " notification sent to " & sendto & "."""
+ Else
+ WshShell.Exec "eventcreate /L Application /ID 202 /T ERROR " _
+ & "/SO ""Box Backup"" /D ""Failed to send " & args(0) _
+ & " notification to " & sendto & "."""
+ End If
+End Sub
diff --git a/bin/bbackupd/win32/ReadMe.txt b/bin/bbackupd/win32/ReadMe.txt
deleted file mode 100644
index 3d260750..00000000
--- a/bin/bbackupd/win32/ReadMe.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Upgrade instructions
-
-Version 0.09g to 0.09h
-
-This version included patches to the server as well. The server for this
-upgrade can be found at http://home.earthlink.net/~gniemcew/ but hopefully
-will be merged into the core in the next release.
-
-New values in the bbackupd.conf can now be added:
-
-StoreObjectInfoFile = C:\Program Files\Box Backup\bbackupd\bbackupd.dat
-
-This stores the state when a backup daemon is shutdown.
-
-KeepAliveTime = 250
-
-This is imperative if MaximumDiffingTime is larger than 300, this stops the ssl
-layer timing out when a diff is performed. It is wise to set MaximumDiffingTime
-long enough for the largest file you may have. If you do not wish to upgrade your
-server then make KeepAliveTime greater than MaximumDiffingTime.
-
-Have fun
-
-Nick
diff --git a/bin/bbackupd/win32/bbackupd.conf b/bin/bbackupd/win32/bbackupd.conf
index 85915520..6c987f7d 100644
--- a/bin/bbackupd/win32/bbackupd.conf
+++ b/bin/bbackupd/win32/bbackupd.conf
@@ -12,34 +12,69 @@ DataDirectory = C:\Program Files\Box Backup\bbackupd
# If you do not install it in the default location - also do not forget to
# change the pid file location (below)
-
-# This script is run whenever bbackupd encounters a problem which requires
-# the system administrator to assist:
+# This script is run whenever bbackupd changes state or encounters a
+# problem which requires the system administrator to assist:
+#
# 1) The store is full, and no more data can be uploaded.
# 2) Some files or directories were not readable.
-# The default script emails the system administrator.
+# 3) A backup run starts or finishes.
+#
+# The default script emails the system administrator, except for backups
+# starting and stopping, where it does nothing.
+#
+# NOTE: You need to edit the following variables in the script before
+# enabling it:
+#
+# account = "accountnumber"
+# sendto = "your@email.address"
+# smtpserver = "your.smtp.server"
+#
+# You do not need to set smtpserver if the client has the SMTP Service
+# installed, the script will connect directly to the SMTP service.
-# NotifyScript = NotifySysadmin.sh
+NotifyScript = cscript "C:\Program Files\Box Backup\NotifySysAdmin.vbs"
-# A scan of the local discs will be made once an hour (approximately).
-# To avoid cycles of load on the server, this time is randomly adjusted by a small
+# The number of seconds between backup runs under normal conditions. To avoid
+# cycles of load on the server, this time is randomly adjusted by a small
# percentage as the daemon runs.
UpdateStoreInterval = 3600
-# A file must have been modified at least 6 hours ago before it will be uploaded.
+# The minimum age of a file, in seconds, that will be uploaded. Avoids
+# repeated uploads of a file which is constantly being modified.
MinimumFileAge = 21600
-# If a file is modified repeated, it won't be uploaded immediately in case it's modified again.
-# However, it should be uploaded eventually. This is how long we should wait after first noticing
-# a change. (1 day)
+# If a file is modified repeated, it won't be uploaded immediately in case
+# it's modified again, due to the MinimumFileAge specified above. However, it
+# should be uploaded eventually even if it is being modified repeatedly. This
+# is how long we should wait, in seconds, after first noticing a change.
+# (86400 seconds = 1 day)
MaxUploadWait = 86400
+# If the connection is idle for some time (e.g. over 10 minutes or 600
+# seconds, not sure exactly how long) then the server will give up and
+# disconnect the client, resulting in Connection Protocol_Timeout errors
+# on the server and TLSReadFailed or TLSWriteFailed errors on the client.
+# Also, some firewalls and NAT gateways will kill idle connections after
+# similar lengths of time.
+#
+# This can happen for example when most files are backed up already and
+# don't need to be sent to the store again, while scanning a large
+# directory, or while calculating diffs of a large file. To avoid this,
+# KeepAliveTime specifies that special keep-alive messages should be sent
+# when the connection is otherwise idle for a certain length of time,
+# specified here in seconds.
+#
+# The default is that these messages are never sent, equivalent to setting
+# this option to zero, but we recommend that all users enable this.
+
+KeepAliveTime = 120
+
# Files above this size (in bytes) are tracked, and if they are renamed they will simply be
# renamed on the server, rather than being uploaded again. (64k - 1)
@@ -53,30 +88,38 @@ FileTrackingSizeThreshold = 65535
DiffingUploadSizeThreshold = 8192
-# The limit on how much time is spent diffing files. Most files shouldn't take very long,
-# but if you have really big files you can use this to limit the time spent diffing them.
+# The limit on how much time is spent diffing files, in seconds. Most files
+# shouldn't take very long, but if you have really big files you can use this
+# to limit the time spent diffing them.
+#
# * Reduce if you are having problems with processor usage.
-# * Increase if you have large files, and think the upload of changes is too large and want
-# to spend more time searching for unchanged blocks.
+#
+# * Increase if you have large files, and think the upload of changes is too
+# large and you want bbackupd to spend more time searching for unchanged
+# blocks.
-MaximumDiffingTime = 20
+MaximumDiffingTime = 120
-# KeepAliveTime requires Gary's SSL KeepAlive patches
-# KeepAliveTime = 250
# Uncomment this line to see exactly what the daemon is going when it's connected to the server.
# ExtendedLogging = yes
-# Use this to temporarily stop bbackupd from syncronising or connecting to the store.
-# This specifies a program or script script which is run just before each sync, and ideally
-# the full path to the interpreter. It will be run as the same user bbackupd is running as,
-# usually root.
-# The script prints either "now" or a number to STDOUT (and a terminating newline, no quotes).
-# If the result was "now", then the sync will happen. If it's a number, then the script will
-# be asked again in that number of seconds.
-# For example, you could use this on a laptop to only backup when on a specific network.
+# This specifies a program or script script which is run just before each
+# sync, and ideally the full path to the interpreter. It will be run as the
+# same user bbackupd is running as, usually root.
+#
+# The script must output (print) either "now" or a number to STDOUT (and a
+# terminating newline, no quotes).
+#
+# If the result was "now", then the sync will happen. If it's a number, then
+# no backup will happen for that number of seconds (bbackupd will pause) and
+# then the script will be run again.
+#
+# Use this to temporarily stop bbackupd from syncronising or connecting to the
+# store. For example, you could use this on a laptop to only backup when on a
+# specific network, or when it has a working Internet connection.
# SyncAllowScript = /path/to/intepreter/or/exe script-name parameters etc
@@ -85,16 +128,21 @@ MaximumDiffingTime = 20
CommandSocket = pipe
+# Uncomment the StoreObjectInfoFile to enable the experimental archiving
+# of the daemon's state (including client store marker and configuration)
+# between backup runs. This saves time and increases efficiency when
+# bbackupd is frequently stopped and started, since it removes the need
+# to rescan all directories on the remote server. However, it is new and
+# not yet heavily tested, so use with caution.
+
+StoreObjectInfoFile = C:\Program Files\Box Backup\bbackupd\bbackupd.state
Server
{
PidFile = C:\Program Files\Box Backup\bbackupd\bbackupd.pid
}
-# StoreObjectInfoFile requires Gary's client marker serialisation patch
-# StoreObjectInfoFile = C:\Program Files\Box Backup\bbackupd\bbackupd.dat
-#
# BackupLocations specifies which locations on disc should be backed up. Each
# directory is in the format
#
@@ -116,22 +164,69 @@ Server
# For example:
#
# ExcludeDir = /home/guest-user
-# ExcludeFilesRegex = *.(mp3|MP3)$
+# ExcludeFilesRegex = \.(mp3|MP3)$
# AlwaysIncludeFile = /home/username/veryimportant.mp3
#
# This excludes the directory /home/guest-user from the backup along with all mp3
# files, except one MP3 file in particular.
-#
-# In general, Exclude excludes a file or directory, unless the directory is
-# explicitly mentioned in a AlwaysInclude directive.
-#
+#
# If a directive ends in Regex, then it is a regular expression rather than a
-# explicit full pathname. See
-#
-# man 7 re_format
-#
-# for the regex syntax on your platform.
-#
+# explicit full pathname. See:
+#
+# http://bbdev.fluffy.co.uk/trac/wiki/Win32Regex
+#
+# for more information about regular expressions on Windows.
+#
+# In general, Exclude excludes a file or directory, unless the directory is
+# explicitly mentioned in a AlwaysInclude directive. However, Box Backup
+# does NOT scan inside excluded directories and will never back up an
+# AlwaysIncluded file or directory inside an excluded directory or any
+# subdirectory thereof.
+#
+# To back up a directory inside an excluded directory, use a configuration
+# like this, to ensure that each directory in the path to the important
+# files is included, but none of their contents will be backed up except
+# the directories further down that path to the important one.
+#
+# ExcludeDirsRegex = ^/home/user/bigfiles/
+# ExcludeFilesRegex = ^/home/user/bigfiles/
+# AlwaysIncludeDir = /home/user/bigfiles/path
+# AlwaysIncludeDir = /home/user/bigfiles/path/to
+# AlwaysIncludeDir = /home/user/bigfiles/path/important
+# AlwaysIncludeDir = /home/user/bigfiles/path/important/files
+# AlwaysIncludeDirsRegex = ^/home/user/bigfiles/path/important/files/
+# AlwaysIncludeFilesRegex = ^/home/user/bigfiles/path/important/files/
+#
+# Here are some more examples of possible regular expressions for Windows:
+#
+# ExcludeDir = C:\Documents and Settings\Owner
+# ExcludeFilesRegex = \.(mp3|MP3)$
+# AlwaysIncludeFile = C:\Documents and Settings\Owner\My Documents\My Music\veryimportant.mp3
+# ExcludeFilesRegex = \.pst$
+# AlwaysIncludeFilesRegex = \.*backup.*\.pst$
+# ExcludeFilesRegex = \.avi$
+# ExcludeDirsRegex = \\Temporary Internet Files$
+# ExcludeFilesRegex = \\pagefile\.sys$
+# ExcludeDirsRegex = \\pagefile\.sys$
+# ExcludeFilesRegex = \\boot\.ini$
+# ExcludeFilesRegex = \\NTDETECT\.COM$
+# ExcludeFilesRegex = \\UsrClass\.dat\.LOG$
+# ExcludeDirsRegex = \\System Volume Information$
+# ExcludeFilesRegex = \\ntldr$
+# ExcludeDirsRegex = \\Local Settings\\.*\\Cache$
+# ExcludeFilesRegex = \\thumbs\.db$
+# ExcludeFilesRegex = \\~.*
+# ExcludeFilesRegex = \\Perflib.*
+# ExcludeDirsRegex = \\Application Data$
+# ExcludeFilesRegex = \.bk[~!0-9]$
+# ExcludeFilesRegex = \.iso$
+# ExcludeFilesRegex = \.mpe?[2345g]$
+# ExcludeFilesRegex = \.qbw$
+# AlwaysIncludeFilesRegex = \.qbb$
+# ExcludeFilesRegex = \.tif[f]$
+# ExcludeFilesRegex = \.wmv$
+# ExcludeFilesRegex = \.avi$
+# ExcludeFilesRegex = \.(avi|iso|mp(e)?[g345]|bk[~!1-9]|[mt]bk)$
BackupLocations
{