summaryrefslogtreecommitdiff
path: root/lib/bbackupd
diff options
context:
space:
mode:
authorReinhard Tartler <siretart@tauware.de>2020-05-10 19:17:17 -0400
committerReinhard Tartler <siretart@tauware.de>2020-05-10 19:17:17 -0400
commit228bc06db9e262b24a6c235b1e036cc8ef78565f (patch)
tree3703ceb787cf9345600fae3939afeccca7c47719 /lib/bbackupd
parentffb043c482053896b0190ea5ddc1cf2ba70cb4f0 (diff)
parentc5c04da3823c36f03408821cbc5019ec18613922 (diff)
Diffstat (limited to 'lib/bbackupd')
-rw-r--r--lib/bbackupd/BackupClientDirectoryRecord.cpp330
-rw-r--r--lib/bbackupd/BackupClientDirectoryRecord.h5
-rw-r--r--lib/bbackupd/BackupDaemon.cpp222
3 files changed, 318 insertions, 239 deletions
diff --git a/lib/bbackupd/BackupClientDirectoryRecord.cpp b/lib/bbackupd/BackupClientDirectoryRecord.cpp
index 94cb7965..50b5a4bc 100644
--- a/lib/bbackupd/BackupClientDirectoryRecord.cpp
+++ b/lib/bbackupd/BackupClientDirectoryRecord.cpp
@@ -159,6 +159,9 @@ void BackupClientDirectoryRecord::SyncDirectory(
THROW_EXCEPTION(BackupStoreException, SignalReceived)
}
+ std::string local_path_non_vss = ConvertVssPathToRealPath(rLocalPath,
+ rBackupLocation);
+
// Start by making some flag changes, marking this sync as not done,
// and on the immediate sub directories.
mSyncDone = false;
@@ -192,8 +195,7 @@ void BackupClientDirectoryRecord::SyncDirectory(
// just ignore this error. In a future scan, this
// deletion will be noticed, deleted from server,
// and this object deleted.
- rNotifier.NotifyDirStatFailed(this,
- ConvertVssPathToRealPath(rLocalPath, rBackupLocation),
+ rNotifier.NotifyDirStatFailed(this, local_path_non_vss,
strerror(errno));
return;
}
@@ -208,7 +210,7 @@ void BackupClientDirectoryRecord::SyncDirectory(
BackupClientInodeToIDMap &idMap(
rParams.mrContext.GetNewIDMap());
idMap.AddToMap(dest_st.st_ino, mObjectID, ContainingDirectoryID,
- ConvertVssPathToRealPath(rLocalPath, rBackupLocation));
+ local_path_non_vss);
}
// Add attributes to checksum
currentStateChecksum.Add(&dest_st.st_mode,
@@ -243,9 +245,7 @@ void BackupClientDirectoryRecord::SyncDirectory(
DIR *dirHandle = 0;
try
{
- std::string nonVssDirPath = ConvertVssPathToRealPath(rLocalPath,
- rBackupLocation);
- rNotifier.NotifyScanDirectory(this, nonVssDirPath);
+ rNotifier.NotifyScanDirectory(this, local_path_non_vss);
dirHandle = ::opendir(rLocalPath.c_str());
if(dirHandle == 0)
@@ -253,21 +253,17 @@ void BackupClientDirectoryRecord::SyncDirectory(
// Report the error (logs and eventual email to administrator)
if (errno == EACCES)
{
- rNotifier.NotifyDirListFailed(this,
- nonVssDirPath,
+ rNotifier.NotifyDirListFailed(this, local_path_non_vss,
"Access denied");
}
else
{
- rNotifier.NotifyDirListFailed(this,
- nonVssDirPath,
+ rNotifier.NotifyDirListFailed(this, local_path_non_vss,
strerror(errno));
}
- // Report the error (logs and eventual email
- // to administrator)
- SetErrorWhenReadingFilesystemObject(rParams,
- nonVssDirPath);
+ SetErrorWhenReadingFilesystemObject(rParams, local_path_non_vss);
+
// Ignore this directory for now.
return;
}
@@ -327,15 +323,49 @@ void BackupClientDirectoryRecord::SyncDirectory(
try
{
// Want to get the directory listing?
+ bool download_dir = false;
+
if(ThisDirHasJustBeenCreated)
{
// Avoid sending another command to the server when we know it's empty
apDirOnStore.reset(new BackupStoreDirectory(mObjectID,
ContainingDirectoryID));
+ BOX_TRACE("No need to download directory " <<
+ BOX_FORMAT_OBJECTID(mObjectID) << " because it has just been "
+ "created, so we know it's empty");
+ ASSERT(!download_dir);
}
// Consider asking the store for it
- else if(!mInitialSyncDone || checksumDifferent ||
- downloadDirectoryRecordBecauseOfFutureFiles)
+ else if(!mInitialSyncDone)
+ {
+ BOX_TRACE("Downloading directory listing of " << local_path_non_vss <<
+ " (" << BOX_FORMAT_OBJECTID(mObjectID) << " because we haven't "
+ "done an initial sync yet");
+ download_dir = true;
+ }
+ else if(checksumDifferent)
+ {
+ BOX_TRACE("Downloading directory listing of " << local_path_non_vss <<
+ " (" << BOX_FORMAT_OBJECTID(mObjectID) << " because its contents "
+ "have changed locally");
+ download_dir = true;
+ }
+ else if(downloadDirectoryRecordBecauseOfFutureFiles)
+ {
+ BOX_TRACE("Downloading directory listing of " << local_path_non_vss <<
+ " (" << BOX_FORMAT_OBJECTID(mObjectID) << " because it contains "
+ "files with dates in the future");
+ download_dir = true;
+ }
+ else
+ {
+ BOX_TRACE("Not downloading directory listing of " << local_path_non_vss <<
+ " (" << BOX_FORMAT_OBJECTID(mObjectID) << " because our cached "
+ "copy appears to still be valid");
+ ASSERT(!download_dir);
+ }
+
+ if(download_dir)
{
apDirOnStore = FetchDirectoryListing(rParams);
}
@@ -766,17 +796,17 @@ bool BackupClientDirectoryRecord::UpdateItems(
// Decrypt all the directory entries.
// It would be nice to be able to just compare the encrypted versions, however this doesn't work
- // in practise because there can be multiple encodings of the same filename using different
+ // in practise because there can be multiple encodings of the same filename using different
// methods (although each method will result in the same string for the same filename.) This
// happens when the server fixes a broken store, and gives plain text generated filenames.
// So if we didn't do things like this, then you wouldn't be able to recover from bad things
// happening with the server.
DecryptedEntriesMap_t decryptedEntries;
- if(pDirOnStore != 0)
+ if(pDirOnStore != NULL)
{
BackupStoreDirectory::Iterator i(*pDirOnStore);
- BackupStoreDirectory::Entry *en = 0;
- while((en = i.Next()) != 0)
+ BackupStoreDirectory::Entry *en = NULL;
+ while((en = i.Next()) != NULL)
{
std::string filenameClear;
try
@@ -837,9 +867,9 @@ bool BackupClientDirectoryRecord::UpdateItems(
// See if it's in the listing (if we have one)
BackupStoreFilenameClear storeFilename(*f);
- BackupStoreDirectory::Entry *en = 0;
+ BackupStoreDirectory::Entry *en = NULL;
int64_t latestObjectID = 0;
- if(pDirOnStore != 0)
+ if(pDirOnStore != NULL)
{
DecryptedEntriesMap_t::iterator i(decryptedEntries.find(*f));
if(i != decryptedEntries.end())
@@ -850,85 +880,32 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
// Check that the entry which might have been found is in fact a file
- if((en != 0) && !(en->IsFile()))
+ if((en != NULL) && !(en->IsFile()))
{
// Directory exists in the place of this file -- sort it out
- RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore,
- en, *f);
- en = 0;
+ RemoveDirectoryInPlaceOfFile(rParams, pDirOnStore, en, *f);
+ en = NULL;
+ latestObjectID = 0;
}
// Check for renaming?
- if(pDirOnStore != 0 && en == 0)
+ if(pDirOnStore != 0 && en == NULL)
{
// We now know...
// 1) File has just been added
// 2) It's not in the store
-
- // Do we know about the inode number?
- const BackupClientInodeToIDMap &idMap(rContext.GetCurrentIDMap());
- int64_t renameObjectID = 0, renameInDirectory = 0;
- if(idMap.Lookup(inodeNum, renameObjectID, renameInDirectory))
+ ASSERT(latestObjectID == 0);
+ en = CheckForRename(rContext, pDirOnStore, storeFilename, inodeNum,
+ nonVssFilePath);
+ if(en != NULL)
{
- // Look up on the server to get the name, to build the local filename
- std::string localPotentialOldName;
- bool isDir = false;
- bool isCurrentVersion = false;
- box_time_t srvModTime = 0, srvAttributesHash = 0;
- BackupStoreFilenameClear oldLeafname;
- if(rContext.FindFilename(renameObjectID, renameInDirectory,
- localPotentialOldName, isDir, isCurrentVersion,
- &srvModTime, &srvAttributesHash, &oldLeafname))
- {
- // Only interested if it's a file and the latest version
- if(!isDir && isCurrentVersion)
- {
- // Check that the object we found in the ID map doesn't exist on disc
- EMU_STRUCT_STAT st;
- if(EMU_STAT(localPotentialOldName.c_str(), &st) != 0 && errno == ENOENT)
- {
- // Doesn't exist locally, but does exist on the server.
- // Therefore we can safely rename it to this new file.
-
- // Get the connection to the server
- BackupProtocolCallable &connection(rContext.GetConnection());
-
- // Only do this step if there is room on the server.
- // This step will be repeated later when there is space available
- if(!rContext.StorageLimitExceeded())
- {
- // Rename the existing files (ie include old versions) on the server
- connection.QueryMoveObject(renameObjectID,
- renameInDirectory,
- mObjectID /* move to this directory */,
- BackupProtocolMoveObject::Flags_MoveAllWithSameName |
- BackupProtocolMoveObject::Flags_AllowMoveOverDeletedObject,
- storeFilename);
-
- // Stop the attempt to delete the file in the original location
- BackupClientDeleteList &rdelList(rContext.GetDeleteList());
- rdelList.StopFileDeletion(renameInDirectory, oldLeafname);
-
- // Create new entry in the directory for it
- // -- will be near enough what's actually on the server for the rest to work.
- en = pDirOnStore->AddEntry(storeFilename,
- srvModTime, renameObjectID,
- 0 /* size in blocks unknown, but not needed */,
- BackupStoreDirectory::Entry::Flags_File,
- srvAttributesHash);
-
- // Store the object ID for the inode lookup map later
- latestObjectID = renameObjectID;
- }
- }
- }
- }
+ latestObjectID = en->GetObjectID();
}
}
-
+
// Is it in the mPendingEntries list?
box_time_t pendingFirstSeenTime = 0; // ie not seen
- if(mpPendingEntries != 0)
+ if(mpPendingEntries != NULL)
{
std::map<std::string, box_time_t>::const_iterator i(mpPendingEntries->find(*f));
if(i != mpPendingEntries->end())
@@ -939,7 +916,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
// If pDirOnStore == 0, then this must have been after an initial sync:
- ASSERT(pDirOnStore != 0 || mInitialSyncDone);
+ ASSERT(pDirOnStore != NULL || mInitialSyncDone);
// So, if pDirOnStore == 0, then we know that everything before syncPeriodStart
// is either on the server, or in the toupload list. If the directory had changed,
// we'd have got a directory listing.
@@ -962,20 +939,20 @@ bool BackupClientDirectoryRecord::UpdateItems(
// Only upload a file if the mod time locally is
// different to that on the server.
- if (en == 0 || en->GetModificationTime() != modTime)
+ if(en == NULL || en->GetModificationTime() != modTime)
{
// Check the file modified within the acceptable time period we're checking
// If the file isn't on the server, the acceptable time starts at zero.
// Check pDirOnStore and en, because if we didn't download a directory listing,
// pDirOnStore will be zero, but we know it's on the server.
- if (modTime < rParams.mSyncPeriodEnd)
+ if(modTime < rParams.mSyncPeriodEnd)
{
- if (pDirOnStore != 0 && en == 0)
+ if(pDirOnStore != NULL && en == NULL)
{
doUpload = true;
decisionReason = "not on server";
}
- else if (modTime >= rParams.mSyncPeriodStart)
+ else if(modTime >= rParams.mSyncPeriodStart)
{
doUpload = true;
decisionReason = "modified since last sync";
@@ -984,12 +961,19 @@ bool BackupClientDirectoryRecord::UpdateItems(
// However, just in case things are continually
// modified, we check the first seen time.
- // The two compares of syncPeriodEnd and
- // pendingFirstSeenTime are because the values
- // are unsigned.
- if (!doUpload &&
- pendingFirstSeenTime != 0 &&
+ if(!doUpload && pendingFirstSeenTime != 0)
+ {
+ BOX_TRACE("Current period ends at " <<
+ FormatTime(rParams.mSyncPeriodEnd, false, true) <<
+ " and file has been pending since " <<
+ FormatTime(pendingFirstSeenTime, false, true));
+ }
+
+ if(!doUpload && pendingFirstSeenTime != 0 &&
+ // The two compares of syncPeriodEnd and
+ // pendingFirstSeenTime are because the values
+ // are unsigned.
rParams.mSyncPeriodEnd > pendingFirstSeenTime &&
(rParams.mSyncPeriodEnd - pendingFirstSeenTime)
> rParams.mMaxUploadWait)
@@ -1005,7 +989,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
// will pick up the fact it has been added, so the
// store listing will be available when this happens.
- if (!doUpload &&
+ if(!doUpload &&
modTime <= rParams.mSyncPeriodStart &&
en != 0 &&
en->GetModificationTime() != modTime)
@@ -1014,11 +998,11 @@ bool BackupClientDirectoryRecord::UpdateItems(
decisionReason = "mod time changed";
}
- // And just to catch really badly off clocks in
+ // And just to catch really badly off clocks in
// the future for file server clients,
// just upload the file if it's madly in the future.
- if (!doUpload && modTime >
+ if(!doUpload && modTime >
rParams.mUploadAfterThisTimeInTheFuture)
{
doUpload = true;
@@ -1026,14 +1010,14 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
}
- if (en != 0 && en->GetModificationTime() == modTime)
+ if(en != NULL && en->GetModificationTime() == modTime)
{
doUpload = false;
decisionReason = "not modified since last upload";
}
- else if (!doUpload)
+ else if(!doUpload)
{
- if (modTime > rParams.mSyncPeriodEnd)
+ if(modTime > rParams.mSyncPeriodEnd)
{
box_time_t now = GetCurrentBoxTime();
int age = BoxTimeToSeconds(now -
@@ -1060,7 +1044,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
bool fileSynced = true;
- if (doUpload)
+ if(doUpload)
{
// Upload needed, don't mark sync success until
// we've actually done it
@@ -1094,7 +1078,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
attributesHash,
noPreviousVersionOnServer);
- if (latestObjectID == 0)
+ if(latestObjectID == 0)
{
// storage limit exceeded
rParams.mrContext.SetStorageLimitExceeded();
@@ -1118,7 +1102,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
catch(BoxException &e)
{
- if (e.GetType() == BackupStoreException::ExceptionType &&
+ if(e.GetType() == BackupStoreException::ExceptionType &&
e.GetSubType() == BackupStoreException::SignalReceived)
{
// abort requested, pass the
@@ -1185,7 +1169,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
catch (BoxException &e)
{
- BOX_ERROR("Failed to read or store file attributes "
+ BOX_ERROR("Failed to read or store file attributes "
"for '" << nonVssFilePath << "', will try again "
"later");
}
@@ -1199,7 +1183,8 @@ bool BackupClientDirectoryRecord::UpdateItems(
{
mpPendingEntries = new std::map<std::string, box_time_t>;
}
- // Adding to mPendingEntries list
+
+ // Adding to mPendingEntries list
if(pendingFirstSeenTime == 0)
{
// Haven't seen this before -- add to list!
@@ -1236,7 +1221,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
if(currentIDMap.Lookup(inodeNum, objid, dirid))
{
// Found
- if (dirid != mObjectID)
+ if(dirid != mObjectID)
{
BOX_WARNING("Found conflicting parent ID for "
"file ID " << inodeNum << " (" <<
@@ -1260,11 +1245,10 @@ bool BackupClientDirectoryRecord::UpdateItems(
if(latestObjectID != 0)
{
- BOX_TRACE("Storing uploaded file ID " <<
- inodeNum << " (" << nonVssFilePath << ") "
- "in ID map as object " <<
- latestObjectID << " with parent " <<
- mObjectID);
+ BOX_TRACE("Storing uploaded file ID " << inodeNum << " (" <<
+ nonVssFilePath << ") in ID map as object " <<
+ BOX_FORMAT_OBJECTID(latestObjectID) << " with parent " <<
+ BOX_FORMAT_OBJECTID(mObjectID));
idMap.AddToMap(inodeNum, latestObjectID,
mObjectID /* containing directory */,
nonVssFilePath);
@@ -1272,7 +1256,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
}
- if (fileSynced)
+ if(fileSynced)
{
rNotifier.NotifyFileSynchronised(this, nonVssFilePath,
fileSize);
@@ -1403,7 +1387,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
doCreateDirectoryRecord = (subDirObjectID != 0);
}
- if (doCreateDirectoryRecord)
+ if(doCreateDirectoryRecord)
{
// New an object for this
psubDirRecord = new BackupClientDirectoryRecord(subDirObjectID, *d);
@@ -1496,7 +1480,7 @@ bool BackupClientDirectoryRecord::UpdateItems(
mSubDirectories.erase(e);
delete rec;
- BOX_TRACE("Deleted directory record for " <<
+ BOX_TRACE("Deleted directory record for " <<
nonVssLocalName);
}
}
@@ -1507,6 +1491,118 @@ bool BackupClientDirectoryRecord::UpdateItems(
return allUpdatedSuccessfully;
}
+// Returns NULL if not renamed, or the new BackupStoreDirectory::Entry in p_dir (containing the ID
+// of the renamed (moved) object) otherwise.
+BackupStoreDirectory::Entry* BackupClientDirectoryRecord::CheckForRename(
+ BackupClientContext& context, BackupStoreDirectory* p_dir,
+ const BackupStoreFilenameClear& remote_filename, InodeRefType inode_num,
+ const std::string& local_path_non_vss)
+{
+ // We now know...
+ // 1) File has just been added
+ // 2) It's not in the store
+
+ // Do we know about the inode number?
+ const BackupClientInodeToIDMap &idMap(context.GetCurrentIDMap());
+ int64_t prev_object_id = 0, prev_dir_id = 0;
+ if(!idMap.Lookup(inode_num, prev_object_id, prev_dir_id))
+ {
+ return NULL;
+ }
+
+ std::ostringstream msg_prefix_buf;
+ msg_prefix_buf << local_path_non_vss << ": have seen inode " << inode_num << " before, "
+ "with ID " << BOX_FORMAT_OBJECTID(prev_object_id) << " in directory " <<
+ BOX_FORMAT_OBJECTID(prev_dir_id);
+ std::string msg_prefix = msg_prefix_buf.str();
+
+ std::ostringstream msg_suffix_buf;
+ msg_suffix_buf << ", so will not move to directory " << BOX_FORMAT_OBJECTID(mObjectID);
+ std::string msg_suffix = msg_suffix_buf.str();
+
+ // We've seen this inode number before. Look up on the server to get the filename, to
+ // reconstruct the local filename that it had when it was backed up before (elsewhere):
+ std::string possible_prev_local_name;
+ bool was_a_dir = false;
+ bool was_current_version = false;
+ box_time_t remote_mod_time = 0, remote_attr_hash = 0;
+ BackupStoreFilenameClear prev_remote_name;
+ if(!context.FindFilename(prev_object_id, prev_dir_id, possible_prev_local_name, was_a_dir,
+ was_current_version, &remote_mod_time, &remote_attr_hash, &prev_remote_name))
+ {
+ BOX_TRACE(msg_prefix << ", but that no longer exists on the server, so cannot find "
+ "corresponding local file to check for rename");
+ return NULL;
+ }
+
+ // Only interested if it's a file and the latest version
+ if(was_a_dir || !was_current_version)
+ {
+ BOX_TRACE(msg_prefix << ", but that was " <<
+ (was_a_dir ? "a directory" : "not the latest version") <<
+ msg_suffix);
+ return NULL;
+ }
+
+ // Check that the object we found in the ID map doesn't exist on disc
+ EMU_STRUCT_STAT st;
+ if(EMU_STAT(possible_prev_local_name.c_str(), &st) == 0)
+ {
+ BOX_TRACE(msg_prefix << ", but that was for " << possible_prev_local_name << " "
+ "which still exists locally (most likely moved and replaced)" << msg_suffix);
+ return NULL;
+ }
+
+ if(errno != ENOENT)
+ {
+ BOX_TRACE(BOX_SYS_ERROR_MESSAGE(msg_prefix << ", but that was for " <<
+ possible_prev_local_name << " which we cannot access" << msg_suffix));
+ return NULL;
+ }
+
+ // Doesn't exist locally, but does exist on the server.
+ // Therefore we can safely rename it to this new file.
+
+ // Get the connection to the server
+ BackupProtocolCallable &connection(context.GetConnection());
+
+ // Only do this step if there is room on the server.
+ // This step will be repeated later when there is space available
+ if(context.StorageLimitExceeded())
+ {
+ BOX_TRACE(possible_prev_local_name << " appears to have been renamed to " <<
+ local_path_non_vss << ", but our account on the server is full, "
+ "so not moving object " <<
+ BOX_FORMAT_OBJECTID(prev_object_id) << " from directory " <<
+ BOX_FORMAT_OBJECTID(prev_dir_id) << " to " <<
+ BOX_FORMAT_OBJECTID(mObjectID));
+ return NULL;
+ }
+
+ // Rename the existing files (ie include old versions) on the server
+ connection.QueryMoveObject(prev_object_id, prev_dir_id,
+ mObjectID /* move to this directory */,
+ BackupProtocolMoveObject::Flags_MoveAllWithSameName |
+ BackupProtocolMoveObject::Flags_AllowMoveOverDeletedObject,
+ remote_filename);
+
+ // Stop the attempt to delete the file in the original location
+ BackupClientDeleteList &rdelList(context.GetDeleteList());
+ rdelList.StopFileDeletion(prev_dir_id, prev_remote_name);
+
+ BOX_TRACE(possible_prev_local_name << " appears to have been renamed to " <<
+ local_path_non_vss << ", so moving object " <<
+ BOX_FORMAT_OBJECTID(prev_object_id) << " from directory " <<
+ BOX_FORMAT_OBJECTID(prev_dir_id) << " to " <<
+ BOX_FORMAT_OBJECTID(mObjectID));
+
+ // Create new entry in the directory for it: will be near enough what's actually on the
+ // server for the rest to work.
+ return p_dir->AddEntry(remote_filename, remote_mod_time, prev_object_id,
+ 0 /* size in blocks unknown, but not needed */,
+ BackupStoreDirectory::Entry::Flags_File, remote_attr_hash);
+}
+
int64_t BackupClientDirectoryRecord::CreateRemoteDir(const std::string& localDirPath,
const std::string& nonVssDirPath, const std::string& remoteDirPath,
BackupStoreFilenameClear& storeFilename, bool* pHaveJustCreatedDirOnServer,
diff --git a/lib/bbackupd/BackupClientDirectoryRecord.h b/lib/bbackupd/BackupClientDirectoryRecord.h
index 865fc747..7308d7d1 100644
--- a/lib/bbackupd/BackupClientDirectoryRecord.h
+++ b/lib/bbackupd/BackupClientDirectoryRecord.h
@@ -163,6 +163,7 @@ private:
void UpdateAttributes(SyncParams &rParams,
BackupStoreDirectory *pDirOnStore,
const std::string &rLocalPath);
+
protected: // to allow tests to hook in before UpdateItems() runs
virtual bool UpdateItems(SyncParams &rParams,
const std::string &rLocalPath,
@@ -172,7 +173,11 @@ protected: // to allow tests to hook in before UpdateItems() runs
std::vector<BackupStoreDirectory::Entry *> &rEntriesLeftOver,
std::vector<std::string> &rFiles,
const std::vector<std::string> &rDirs);
+
private:
+ BackupStoreDirectory::Entry* CheckForRename(BackupClientContext& context,
+ BackupStoreDirectory* p_dir, const BackupStoreFilenameClear& remote_filename,
+ InodeRefType inode_num, const std::string& local_path_non_vss);
int64_t CreateRemoteDir(const std::string& localDirPath,
const std::string& nonVssDirPath,
const std::string& remoteDirPath,
diff --git a/lib/bbackupd/BackupDaemon.cpp b/lib/bbackupd/BackupDaemon.cpp
index d75aa381..f4dcf270 100644
--- a/lib/bbackupd/BackupDaemon.cpp
+++ b/lib/bbackupd/BackupDaemon.cpp
@@ -531,7 +531,7 @@ void BackupDaemon::Run()
mapCommandSocketInfo->mListeningSocket.Listen(
socketName);
#else
- ::unlink(socketName);
+ EMU_UNLINK(socketName);
mapCommandSocketInfo->mListeningSocket.Listen(
Socket::TypeUNIX, socketName);
#endif
@@ -2503,12 +2503,32 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
}
}
- const Configuration& rConfig(
- rLocationsConf.GetSubConfiguration(*pLocName));
+ const Configuration& rConfig(rLocationsConf.GetSubConfiguration(*pLocName));
std::auto_ptr<Location> apLoc;
+ BackupStoreFilenameClear dirname(*pLocName); // generate the filename
+ bool local_dir_exists = true;
+ int64_t existing_remote_dir_id = 0;
+
+ box_time_t attrModTime = 0;
+ BackupClientFileAttributes attr;
+
try
{
+ // Does this exist on the server? Remove from dir object early, so that if
+ // we fail to stat the local directory, we still don't consider to remote
+ // one for deletion.
+ BackupStoreDirectory::Iterator iter(dir);
+ BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
+ if(en != NULL)
+ {
+ existing_remote_dir_id = en->GetObjectID();
+
+ // Delete the entry from the directory, so we get a list of
+ // unused root directories at the end of this.
+ dir.DeleteEntry(existing_remote_dir_id);
+ }
+
if(pLoc == NULL)
{
// Create a record for it
@@ -2523,59 +2543,42 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
pLoc->mPath = rConfig.GetKeyValue("Path");
}
- // Read the exclude lists from the Configuration
- pLoc->mapExcludeFiles.reset(BackupClientMakeExcludeList_Files(rConfig));
- pLoc->mapExcludeDirs.reset(BackupClientMakeExcludeList_Dirs(rConfig));
+ // Get the directory's attributes and modification time. We need this to
+ // check whether the local directory exists, even if we don't have
+ // stat[v]fs(). Otherwise we must skip it.
+ attr.ReadAttributes(pLoc->mPath.c_str(),
+ true /* directories have zero mod times */,
+ 0 /* not interested in mod time */,
+ &attrModTime /* get the attribute modification time */);
- // Does this exist on the server?
- // Remove from dir object early, so that if we fail
- // to stat the local directory, we still don't
- // consider to remote one for deletion.
- BackupStoreDirectory::Iterator iter(dir);
- BackupStoreFilenameClear dirname(pLoc->mName); // generate the filename
- BackupStoreDirectory::Entry *en = iter.FindMatchingClearName(dirname);
- int64_t oid = 0;
- if(en != 0)
- {
- oid = en->GetObjectID();
-
- // Delete the entry from the directory, so we get a list of
- // unused root directories at the end of this.
- dir.DeleteEntry(oid);
- }
-
// Do a fsstat on the pathname to find out which mount it's on
{
#if defined HAVE_STRUCT_STATFS_F_MNTONNAME || defined HAVE_STRUCT_STATVFS_F_MNTONNAME || defined WIN32
// BSD style statfs -- includes mount point, which is nice.
-#ifdef HAVE_STRUCT_STATVFS_F_MNTONNAME
+# ifdef HAVE_STRUCT_STATVFS_F_MNTONNAME
struct statvfs s;
if(::statvfs(pLoc->mPath.c_str(), &s) != 0)
-#else // HAVE_STRUCT_STATVFS_F_MNTONNAME
+# else // HAVE_STRUCT_STATVFS_F_MNTONNAME
struct statfs s;
if(::statfs(pLoc->mPath.c_str(), &s) != 0)
-#endif // HAVE_STRUCT_STATVFS_F_MNTONNAME
+# endif // HAVE_STRUCT_STATVFS_F_MNTONNAME
{
- THROW_SYS_ERROR("Failed to stat path "
- "'" << pLoc->mPath << "' "
- "for location "
- "'" << pLoc->mName << "'",
+ THROW_SYS_FILE_ERROR(pLoc->mPath, "Failed to stat local path",
CommonException, OSFileError);
}
// Where the filesystem is mounted
std::string mountName(s.f_mntonname);
-#else // !HAVE_STRUCT_STATFS_F_MNTONNAME && !WIN32
+#else // !HAVE_STRUCT_STAT*FS_F_MNTONNAME && !WIN32
// Warn in logs if the directory isn't absolute
if(pLoc->mPath[0] != '/')
{
- BOX_WARNING("Location path '"
- << pLoc->mPath
- << "' is not absolute");
+ BOX_WARNING(BOX_FILE_MESSAGE(pLoc->mPath,
+ "location path is not absolute"));
}
// Go through the mount points found, and find a suitable one
std::string mountName("/");
@@ -2596,12 +2599,10 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
break;
}
}
- BOX_TRACE("mount point chosen for "
- << pLoc->mPath << " is "
- << mountName);
+ BOX_TRACE("mount point chosen for " << pLoc->mPath <<
+ " is " << mountName);
}
-
-#endif
+#endif // !HAVE_STRUCT_STAT*FS_F_MNTONNAME && !WIN32
// Got it?
std::map<std::string, int>::iterator f(mounts.find(mountName));
@@ -2623,89 +2624,68 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
++numIDMaps;
}
}
-
- // Does this exist on the server?
- if(en == 0)
- {
- // Doesn't exist, so it has to be created on the server. Let's go!
- // First, get the directory's attributes and modification time
- box_time_t attrModTime = 0;
- BackupClientFileAttributes attr;
- try
- {
- attr.ReadAttributes(pLoc->mPath.c_str(),
- true /* directories have zero mod times */,
- 0 /* not interested in mod time */,
- &attrModTime /* get the attribute modification time */);
- }
- catch (BoxException &e)
- {
- BOX_ERROR("Failed to get attributes "
- "for path '" << pLoc->mPath
- << "', skipping location '" <<
- pLoc->mName << "'");
- throw;
- }
-
- // Execute create directory command
- try
- {
- std::auto_ptr<IOStream> attrStream(
- new MemBlockStream(attr));
- std::auto_ptr<BackupProtocolSuccess>
- dirCreate(connection.QueryCreateDirectory(
- BACKUPSTORE_ROOT_DIRECTORY_ID, // containing directory
- attrModTime, dirname, attrStream));
-
- // Object ID for later creation
- oid = dirCreate->GetObjectID();
- }
- catch (BoxException &e)
- {
- BOX_ERROR("Failed to create remote "
- "directory '/" << pLoc->mName <<
- "', skipping location '" <<
- pLoc->mName << "'");
- throw;
- }
+ }
+ catch (std::exception &e)
+ {
+ BOX_ERROR("Failed to configure location '" << pLoc->mName << "': " <<
+ e.what());
+ local_dir_exists = false;
+ }
+ catch(...)
+ {
+ BOX_ERROR("Failed to configure location '" << pLoc->mName << "': please "
+ "check for previous errors");
+ local_dir_exists = false;
+ }
- }
+ // Remove it from the temporary list to avoid deletion, even if it doesn't actually
+ // exist locally at this time:
+ tmpLocations.remove(pLoc);
- // Create and store the directory object for the root of this location
- ASSERT(oid != 0);
- if(pLoc->mapDirectoryRecord.get() == NULL)
- {
- pLoc->mapDirectoryRecord.reset(
- new BackupClientDirectoryRecord(oid, *pLocName));
- }
-
- // Remove it from the temporary list to avoid deletion
- tmpLocations.remove(pLoc);
+ // Does this exist on the server?
+ if(!local_dir_exists)
+ {
+ mReadErrorsOnFilesystemObjects = true;
+ // Don't try to create it remotely, just skip it.
+ continue;
+ }
- // Push it back on the vector of locations
- mLocations.push_back(pLoc);
+ if(existing_remote_dir_id == 0)
+ {
+ // Doesn't exist, so it has to be created on the server. Let's go!
- if(apLoc.get() != NULL)
- {
- // Don't delete it now!
- apLoc.release();
- }
+ // Create the remote directory for this location. If this fails, then we
+ // abort the whole backup, otherwise the error isn't reported sufficiently
+ // severely for test_bbackupd_responds_to_connection_failure_in_process_s3.
+ std::auto_ptr<IOStream> attrStream(new MemBlockStream(attr));
+ std::auto_ptr<BackupProtocolSuccess> dirCreate(
+ connection.QueryCreateDirectory(
+ BACKUPSTORE_ROOT_DIRECTORY_ID, // containing directory
+ attrModTime, dirname, attrStream));
+
+ // Object ID for later creation
+ existing_remote_dir_id = dirCreate->GetObjectID();
}
- catch (std::exception &e)
+
+ // Create and store the directory object for the root of this location
+ ASSERT(existing_remote_dir_id != 0);
+ if(pLoc->mapDirectoryRecord.get() == NULL)
{
- BOX_ERROR("Failed to configure location '"
- << pLoc->mName << "' path '"
- << pLoc->mPath << "': " << e.what() <<
- ": please check for previous errors");
- mReadErrorsOnFilesystemObjects = true;
+ pLoc->mapDirectoryRecord.reset(
+ new BackupClientDirectoryRecord(existing_remote_dir_id, *pLocName));
}
- catch(...)
+
+ // Read the exclude lists from the Configuration
+ pLoc->mapExcludeFiles.reset(BackupClientMakeExcludeList_Files(rConfig));
+ pLoc->mapExcludeDirs.reset(BackupClientMakeExcludeList_Dirs(rConfig));
+
+ // Push it back on the vector of locations
+ mLocations.push_back(pLoc);
+
+ if(apLoc.get() != NULL)
{
- BOX_ERROR("Failed to configure location '"
- << pLoc->mName << "' path '"
- << pLoc->mPath << "': please check for "
- "previous errors");
- mReadErrorsOnFilesystemObjects = true;
+ // Don't delete it now!
+ apLoc.release();
}
}
@@ -2714,8 +2694,7 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
i = tmpLocations.begin();
i != tmpLocations.end(); i++)
{
- BOX_INFO("Removing obsolete location from memory: " <<
- (*i)->mName);
+ BOX_INFO("Removing obsolete location from memory: " << (*i)->mName);
delete *i;
}
@@ -2745,8 +2724,7 @@ void BackupDaemon::SetupLocations(BackupClientContext &rClientContext, const Con
SecondsToBoxTime(mDeleteRedundantLocationsAfter);
}
- int secs = BoxTimeToSeconds(mDeleteUnusedRootDirEntriesAfter
- - now);
+ int secs = BoxTimeToSeconds(mDeleteUnusedRootDirEntriesAfter - now);
BOX_NOTICE(dir.GetNumberOfEntries() << " redundant locations "
"in root directory found, will delete from store "
@@ -2829,7 +2807,7 @@ void BackupDaemon::FillIDMapVector(std::vector<BackupClientInodeToIDMap *> &rVec
BOX_NOTICE("Found an incomplete ID map "
"database, deleting it to start "
"afresh: " << filename);
- if(unlink(filename.c_str()) != 0)
+ if(EMU_UNLINK(filename.c_str()) != 0)
{
BOX_LOG_NATIVE_ERROR(BOX_FILE_MESSAGE(
filename, "Failed to delete "
@@ -2878,14 +2856,14 @@ void BackupDaemon::DeleteCorruptBerkelyDbFiles()
// Delete the file
BOX_TRACE("Deleting " << filename);
- ::unlink(filename.c_str());
+ EMU_UNLINK(filename.c_str());
// Add a suffix for the new map
filename += ".n";
// Delete that too
BOX_TRACE("Deleting " << filename);
- ::unlink(filename.c_str());
+ EMU_UNLINK(filename.c_str());
}
}
@@ -3636,7 +3614,7 @@ bool BackupDaemon::DeleteStoreObjectInfo() const
}
// Actually delete it
- if(::unlink(storeObjectInfoFile.c_str()) != 0)
+ if(EMU_UNLINK(storeObjectInfoFile.c_str()) != 0)
{
BOX_LOG_SYS_ERROR("Failed to delete the old "
"StoreObjectInfoFile: " << storeObjectInfoFile);