summaryrefslogtreecommitdiff
path: root/lib/backupstore/BackupStoreContext.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/backupstore/BackupStoreContext.cpp')
-rw-r--r--lib/backupstore/BackupStoreContext.cpp721
1 files changed, 429 insertions, 292 deletions
diff --git a/lib/backupstore/BackupStoreContext.cpp b/lib/backupstore/BackupStoreContext.cpp
index 2c98b1d7..1a782df4 100644
--- a/lib/backupstore/BackupStoreContext.cpp
+++ b/lib/backupstore/BackupStoreContext.cpp
@@ -30,13 +30,14 @@
#include "MemLeakFindOn.h"
-// Maximum number of directories to keep in the cache
-// When the cache is bigger than this, everything gets
-// deleted.
+// Maximum number of directories to keep in the cache When the cache is bigger
+// than this, everything gets deleted. In tests, we set the cache size to zero
+// to ensure that it's always flushed, which is very inefficient but helps to
+// catch programming errors (use of freed data).
#ifdef BOX_RELEASE_BUILD
#define MAX_CACHE_SIZE 32
#else
- #define MAX_CACHE_SIZE 2
+ #define MAX_CACHE_SIZE 0
#endif
// Allow the housekeeping process 4 seconds to release an account
@@ -54,19 +55,22 @@
//
// --------------------------------------------------------------------------
BackupStoreContext::BackupStoreContext(int32_t ClientID,
- HousekeepingInterface &rDaemon, const std::string& rConnectionDetails)
- : mConnectionDetails(rConnectionDetails),
- mClientID(ClientID),
- mrDaemon(rDaemon),
- mProtocolPhase(Phase_START),
- mClientHasAccount(false),
- mStoreDiscSet(-1),
- mReadOnly(true),
- mSaveStoreInfoDelay(STORE_INFO_SAVE_DELAY),
- mpTestHook(NULL)
+ HousekeepingInterface* pHousekeeping, const std::string& rConnectionDetails)
+: mConnectionDetails(rConnectionDetails),
+ mClientID(ClientID),
+ mpHousekeeping(pHousekeeping),
+ mProtocolPhase(Phase_START),
+ mClientHasAccount(false),
+ mStoreDiscSet(-1),
+ mReadOnly(true),
+ mSaveStoreInfoDelay(STORE_INFO_SAVE_DELAY),
+ mpTestHook(NULL)
+// If you change the initialisers, be sure to update
+// BackupStoreContext::ReceivedFinishCommand as well!
{
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -77,11 +81,19 @@ BackupStoreContext::BackupStoreContext(int32_t ClientID,
// --------------------------------------------------------------------------
BackupStoreContext::~BackupStoreContext()
{
+ ClearDirectoryCache();
+}
+
+
+void BackupStoreContext::ClearDirectoryCache()
+{
// Delete the objects in the cache
- for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin());
+ i != mDirectoryCache.end(); ++i)
{
delete (i->second);
}
+ mDirectoryCache.clear();
}
@@ -103,6 +115,7 @@ void BackupStoreContext::CleanUp()
}
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -118,6 +131,20 @@ void BackupStoreContext::ReceivedFinishCommand()
// Save the store info, not delayed
SaveStoreInfo(false);
}
+
+ // Just in case someone wants to reuse a local protocol object,
+ // put the context back to its initial state.
+ mProtocolPhase = BackupStoreContext::Phase_Version;
+
+ // Avoid the need to check version again, by not resetting
+ // mClientHasAccount, mAccountRootDir or mStoreDiscSet
+
+ mReadOnly = true;
+ mSaveStoreInfoDelay = STORE_INFO_SAVE_DELAY;
+ mpTestHook = NULL;
+ mapStoreInfo.reset();
+ mapRefCount.reset();
+ ClearDirectoryCache();
}
@@ -133,19 +160,19 @@ bool BackupStoreContext::AttemptToGetWriteLock()
{
// Make the filename of the write lock file
std::string writeLockFile;
- StoreStructure::MakeWriteLockFilename(mStoreRoot, mStoreDiscSet, writeLockFile);
+ StoreStructure::MakeWriteLockFilename(mAccountRootDir, mStoreDiscSet, writeLockFile);
// Request the lock
bool gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
-
- if(!gotLock)
+
+ if(!gotLock && mpHousekeeping)
{
// The housekeeping process might have the thing open -- ask it to stop
char msg[256];
- int msgLen = sprintf(msg, "r%x\n", mClientID);
+ int msgLen = snprintf(msg, sizeof(msg), "r%x\n", mClientID);
// Send message
- mrDaemon.SendMessageToHousekeepingProcess(msg, msgLen);
-
+ mpHousekeeping->SendMessageToHousekeepingProcess(msg, msgLen);
+
// Then try again a few times
int tries = MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT;
do
@@ -153,16 +180,16 @@ bool BackupStoreContext::AttemptToGetWriteLock()
::sleep(1 /* second */);
--tries;
gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
-
+
} while(!gotLock && tries > 0);
}
-
+
if(gotLock)
{
// Got the lock, mark as not read only
mReadOnly = false;
}
-
+
return gotLock;
}
@@ -181,16 +208,16 @@ void BackupStoreContext::LoadStoreInfo()
{
THROW_EXCEPTION(BackupStoreException, StoreInfoAlreadyLoaded)
}
-
+
// Load it up!
- std::auto_ptr<BackupStoreInfo> i(BackupStoreInfo::Load(mClientID, mStoreRoot, mStoreDiscSet, mReadOnly));
-
+ std::auto_ptr<BackupStoreInfo> i(BackupStoreInfo::Load(mClientID, mAccountRootDir, mStoreDiscSet, mReadOnly));
+
// Check it
if(i->GetAccountID() != mClientID)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoForWrongAccount)
}
-
+
// Keep the pointer to it
mapStoreInfo = i;
@@ -203,12 +230,11 @@ void BackupStoreContext::LoadStoreInfo()
}
catch(BoxException &e)
{
- BOX_WARNING("Reference count database is missing or corrupted, "
- "creating a new one, expect housekeeping to find and "
- "fix problems with reference counts later.");
-
- BackupStoreRefCountDatabase::CreateForRegeneration(account);
- mapRefCount = BackupStoreRefCountDatabase::Load(account, false);
+ THROW_EXCEPTION_MESSAGE(BackupStoreException,
+ CorruptReferenceCountDatabase, "Reference count "
+ "database is missing or corrupted, cannot safely open "
+ "account. Housekeeping will fix this automatically "
+ "when it next runs.");
}
}
@@ -227,6 +253,7 @@ void BackupStoreContext::SaveStoreInfo(bool AllowDelay)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
+
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
@@ -242,7 +269,7 @@ void BackupStoreContext::SaveStoreInfo(bool AllowDelay)
}
}
- // Want to save now
+ // Want to save now
mapStoreInfo->Save();
// Set count for next delay
@@ -263,83 +290,111 @@ void BackupStoreContext::SaveStoreInfo(bool AllowDelay)
void BackupStoreContext::MakeObjectFilename(int64_t ObjectID, std::string &rOutput, bool EnsureDirectoryExists)
{
// Delegate to utility function
- StoreStructure::MakeObjectFilename(ObjectID, mStoreRoot, mStoreDiscSet, rOutput, EnsureDirectoryExists);
+ StoreStructure::MakeObjectFilename(ObjectID, mAccountRootDir, mStoreDiscSet, rOutput, EnsureDirectoryExists);
}
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupStoreContext::GetDirectoryInternal(int64_t)
-// Purpose: Return a reference to a directory. Valid only until the
-// next time a function which affects directories is called.
-// Mainly this funciton, and creation of files.
-// Private version of this, which returns non-const directories.
+// Name: BackupStoreContext::GetDirectoryInternal(int64_t,
+// bool)
+// Purpose: Return a reference to a directory. Valid only until
+// the next time a function which affects directories
+// is called. Mainly this function, and creation of
+// files. Private version of this, which returns
+// non-const directories. Unless called with
+// AllowFlushCache == false, the cache may be flushed,
+// invalidating all directory references that you may
+// be holding, so beware.
// Created: 2003/09/02
//
// --------------------------------------------------------------------------
-BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID)
+BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID,
+ bool AllowFlushCache)
{
// Get the filename
std::string filename;
MakeObjectFilename(ObjectID, filename);
-
+ int64_t oldRevID = 0, newRevID = 0;
+
// Already in cache?
std::map<int64_t, BackupStoreDirectory*>::iterator item(mDirectoryCache.find(ObjectID));
- if(item != mDirectoryCache.end())
- {
- // Check the revision ID of the file -- does it need refreshing?
- int64_t revID = 0;
- if(!RaidFileRead::FileExists(mStoreDiscSet, filename, &revID))
- {
- THROW_EXCEPTION(BackupStoreException, DirectoryHasBeenDeleted)
- }
-
- if(revID == item->second->GetRevisionID())
+ if(item != mDirectoryCache.end()) {
+#ifndef BOX_RELEASE_BUILD // it might be in the cache, but invalidated
+ // in which case, delete it instead of returning it.
+ if(!item->second->IsInvalidated())
+#else
+ if(true)
+#endif
{
- // Looks good... return the cached object
- BOX_TRACE("Returning object " <<
- BOX_FORMAT_OBJECTID(ObjectID) <<
- " from cache, modtime = " << revID);
- return *(item->second);
+ oldRevID = item->second->GetRevisionID();
+
+ // Check the revision ID of the file -- does it need refreshing?
+ if(!RaidFileRead::FileExists(mStoreDiscSet, filename, &newRevID))
+ {
+ THROW_EXCEPTION(BackupStoreException, DirectoryHasBeenDeleted)
+ }
+
+ if(newRevID == oldRevID)
+ {
+ // Looks good... return the cached object
+ BOX_TRACE("Returning object " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " from cache, modtime = " << newRevID)
+ return *(item->second);
+ }
}
-
- BOX_TRACE("Refreshing object " <<
- BOX_FORMAT_OBJECTID(ObjectID) <<
- " in cache, modtime changed from " <<
- item->second->GetRevisionID() << " to " << revID);
// Delete this cached object
delete item->second;
mDirectoryCache.erase(item);
}
-
+
// Need to load it up
-
+
// First check to see if the cache is too big
- if(mDirectoryCache.size() > MAX_CACHE_SIZE)
+ if(mDirectoryCache.size() > MAX_CACHE_SIZE && AllowFlushCache)
{
- // Very simple. Just delete everything!
- for(std::map<int64_t, BackupStoreDirectory*>::iterator i(mDirectoryCache.begin()); i != mDirectoryCache.end(); ++i)
+ // Very simple. Just delete everything! But in debug builds,
+ // leave the entries in the cache and invalidate them instead,
+ // so that any attempt to access them will cause an assertion
+ // failure that helps to track down the error.
+#ifdef BOX_RELEASE_BUILD
+ ClearDirectoryCache();
+#else
+ for(std::map<int64_t, BackupStoreDirectory*>::iterator
+ i = mDirectoryCache.begin();
+ i != mDirectoryCache.end(); i++)
{
- delete (i->second);
+ i->second->Invalidate();
}
- mDirectoryCache.clear();
+#endif
}
// Get a RaidFileRead to read it
- int64_t revID = 0;
- std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename, &revID));
- ASSERT(revID != 0);
-
- // New directory object
- std::auto_ptr<BackupStoreDirectory> dir(new BackupStoreDirectory);
-
+ std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet,
+ filename, &newRevID));
+
+ ASSERT(newRevID != 0);
+
+ if (oldRevID == 0)
+ {
+ BOX_TRACE("Loading object " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ " with modtime " << newRevID);
+ }
+ else
+ {
+ BOX_TRACE("Refreshing object " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ " in cache, modtime changed from " << oldRevID <<
+ " to " << newRevID);
+ }
+
// Read it from the stream, then set it's revision ID
BufferedStream buf(*objectFile);
- dir->ReadFromStream(buf, IOStream::TimeOutInfinite);
- dir->SetRevisionID(revID);
-
+ std::auto_ptr<BackupStoreDirectory> dir(new BackupStoreDirectory(buf));
+ dir->SetRevisionID(newRevID);
+
// Make sure the size of the directory is available for writing the dir back
int64_t dirSize = objectFile->GetDiscUsageInBlocks();
ASSERT(dirSize > 0);
@@ -348,7 +403,7 @@ BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID)
// Store in cache
BackupStoreDirectory *pdir = dir.release();
try
- {
+ {
mDirectoryCache[ObjectID] = pdir;
}
catch(...)
@@ -356,11 +411,12 @@ BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID)
delete pdir;
throw;
}
-
+
// Return it
return *pdir;
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -381,12 +437,12 @@ int64_t BackupStoreContext::AllocateObjectID()
// to try for finding an unused ID.
// (Sizes used in the store info are fixed by the housekeeping process)
int retryLimit = (STORE_INFO_SAVE_DELAY * 2);
-
+
while(retryLimit > 0)
{
// Attempt to allocate an ID from the store
int64_t id = mapStoreInfo->AllocateObjectID();
-
+
// Generate filename
std::string filename;
MakeObjectFilename(id, filename);
@@ -396,17 +452,17 @@ int64_t BackupStoreContext::AllocateObjectID()
// Success!
return id;
}
-
+
// Decrement retry count, and try again
--retryLimit;
-
+
// Mark that the store info should be saved as soon as possible
mSaveStoreInfoDelay = 0;
-
+
BOX_WARNING("When allocating object ID, found that " <<
BOX_FORMAT_OBJECTID(id) << " is already in use");
}
-
+
THROW_EXCEPTION(BackupStoreException, CouldNotFindUnusedIDDuringAllocation)
}
@@ -431,11 +487,12 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
+
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
}
-
+
// This is going to be a bit complex to make sure it copes OK
// with things going wrong.
// The only thing which isn't safe is incrementing the object ID
@@ -444,13 +501,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// be corrected the next time the account has a housekeeping run,
// and the object ID allocation code is tolerant of missed IDs.
// (the info is written lazily, so these are necessary)
-
+
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
-
+
// Allocate the next ID
int64_t id = AllocateObjectID();
-
+
// Stream the file to disc
std::string fn;
MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
@@ -458,12 +515,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
RaidFileWrite *ppreviousVerStoreFile = 0;
bool reversedDiffIsCompletelyDifferent = false;
int64_t oldVersionNewBlocksUsed = 0;
+ BackupStoreInfo::Adjustment adjustment = {};
+
try
{
RaidFileWrite storeFile(mStoreDiscSet, fn);
storeFile.Open(false /* no overwriting */);
- // size adjustment from use of patch in old file
int64_t spaceSavedByConversionToPatch = 0;
// Diff or full file?
@@ -482,12 +540,12 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
{
THROW_EXCEPTION(BackupStoreException, DiffFromIDNotFoundInDirectory)
}
-
+
// Diff file, needs to be recreated.
// Choose a temporary filename.
std::string tempFn(RaidFileController::DiscSetPathToFileSystemPath(mStoreDiscSet, fn + ".difftemp",
1 /* NOT the same disc as the write file, to avoid using lots of space on the same disc unnecessarily */));
-
+
try
{
// Open it twice
@@ -506,13 +564,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
THROW_EXCEPTION(CommonException, OSFileError);
}
#endif
-
+
// Stream the incoming diff to this temporary file
if(!rFile.CopyStreamTo(diff, BACKUP_STORE_TIMEOUT))
{
THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
}
-
+
// Verify the diff
diff.Seek(0, IOStream::SeekType_Absolute);
if(!BackupStoreFile::VerifyEncodedFileFormat(diff))
@@ -526,7 +584,7 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// Filename of the old version
std::string oldVersionFilename;
MakeObjectFilename(DiffFromFileID, oldVersionFilename, false /* no need to make sure the directory it's in exists */);
-
+
// Reassemble that diff -- open previous file, and combine the patch and file
std::auto_ptr<RaidFileRead> from(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
BackupStoreFile::CombineFile(diff, diff2, *from, storeFile);
@@ -539,15 +597,26 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
diff.Seek(0, IOStream::SeekType_Absolute);
BackupStoreFile::ReverseDiffFile(diff, *from, *from2, *ppreviousVerStoreFile,
DiffFromFileID, &reversedDiffIsCompletelyDifferent);
-
+
// Store disc space used
oldVersionNewBlocksUsed = ppreviousVerStoreFile->GetDiscUsageInBlocks();
-
+
// And make a space adjustment for the size calculation
spaceSavedByConversionToPatch =
from->GetDiscUsageInBlocks() -
oldVersionNewBlocksUsed;
+ adjustment.mBlocksUsed -= spaceSavedByConversionToPatch;
+ // The code below will change the patch from a
+ // Current file to an Old file, so we need to
+ // account for it as a Current file here.
+ adjustment.mBlocksInCurrentFiles -=
+ spaceSavedByConversionToPatch;
+
+ // Don't adjust anything else here. We'll do it
+ // when we update the directory just below,
+ // which also accounts for non-diff replacements.
+
// Everything cleans up here...
}
catch(...)
@@ -557,14 +626,17 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
throw;
}
}
-
+
// Get the blocks used
newObjectBlocksUsed = storeFile.GetDiscUsageInBlocks();
-
+ adjustment.mBlocksUsed += newObjectBlocksUsed;
+ adjustment.mBlocksInCurrentFiles += newObjectBlocksUsed;
+ adjustment.mNumCurrentFiles++;
+
// Exceeds the hard limit?
- int64_t newBlocksUsed = mapStoreInfo->GetBlocksUsed() +
- newObjectBlocksUsed - spaceSavedByConversionToPatch;
- if(newBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
+ int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() +
+ adjustment.mBlocksUsed;
+ if(newTotalBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
{
THROW_EXCEPTION(BackupStoreException, AddedFileExceedsStorageLimit)
// The store file will be deleted automatically by the RaidFile object
@@ -581,7 +653,7 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
delete ppreviousVerStoreFile;
ppreviousVerStoreFile = 0;
}
-
+
throw;
}
@@ -596,21 +668,34 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// Error! Delete the file
RaidFileWrite del(mStoreDiscSet, fn);
del.Delete();
-
+
// Exception
THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
}
- }
-
+ }
+
// Modify the directory -- first make all files with the same name
// marked as an old version
- int64_t blocksInOldFiles = 0;
try
{
+ // Adjust the entry for the object that we replaced with a
+ // patch, above.
+ BackupStoreDirectory::Entry *poldEntry = NULL;
+
+ if(DiffFromFileID != 0)
+ {
+ // Get old version entry
+ poldEntry = dir.FindEntryByID(DiffFromFileID);
+ ASSERT(poldEntry != 0);
+
+ // Adjust size of old entry
+ int64_t oldSize = poldEntry->GetSizeInBlocks();
+ poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
+ }
+
if(MarkFileWithSameNameAsOldVersions)
{
BackupStoreDirectory::Iterator i(dir);
-
BackupStoreDirectory::Entry *e = 0;
while((e = i.Next()) != 0)
{
@@ -626,43 +711,30 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
e->AddFlags(BackupStoreDirectory::Entry::Flags_OldVersion);
// Can safely do this, because we know we won't be here if it's already
// an old version
- blocksInOldFiles += e->GetSizeInBlocks();
+ adjustment.mBlocksInOldFiles += e->GetSizeInBlocks();
+ adjustment.mBlocksInCurrentFiles -= e->GetSizeInBlocks();
+ adjustment.mNumOldFiles++;
+ adjustment.mNumCurrentFiles--;
}
}
}
}
-
+
// Then the new entry
BackupStoreDirectory::Entry *pnewEntry = dir.AddEntry(rFilename,
- ModificationTime, id, newObjectBlocksUsed,
- BackupStoreDirectory::Entry::Flags_File,
- AttributesHash);
+ ModificationTime, id, newObjectBlocksUsed,
+ BackupStoreDirectory::Entry::Flags_File,
+ AttributesHash);
- // Adjust for the patch back stuff?
- if(DiffFromFileID != 0)
+ // Adjust dependency info of file?
+ if(DiffFromFileID && poldEntry && !reversedDiffIsCompletelyDifferent)
{
- // Get old version entry
- BackupStoreDirectory::Entry *poldEntry = dir.FindEntryByID(DiffFromFileID);
- ASSERT(poldEntry != 0);
-
- // Adjust dependency info of file?
- if(!reversedDiffIsCompletelyDifferent)
- {
- poldEntry->SetDependsNewer(id);
- pnewEntry->SetDependsOlder(DiffFromFileID);
- }
-
- // Adjust size of old entry
- int64_t oldSize = poldEntry->GetSizeInBlocks();
- poldEntry->SetSizeInBlocks(oldVersionNewBlocksUsed);
-
- // And adjust blocks used count, for later adjustment
- newObjectBlocksUsed += (oldVersionNewBlocksUsed - oldSize);
- blocksInOldFiles += (oldVersionNewBlocksUsed - oldSize);
+ poldEntry->SetDependsNewer(id);
+ pnewEntry->SetDependsOlder(DiffFromFileID);
}
// Write the directory back to disc
- SaveDirectory(dir, InDirectory);
+ SaveDirectory(dir);
// Commit the old version's new patched version, now that the directory safely reflects
// the state of the files on disc.
@@ -678,47 +750,42 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// Back out on adding that file
RaidFileWrite del(mStoreDiscSet, fn);
del.Delete();
-
+
// Remove this entry from the cache
RemoveDirectoryFromCache(InDirectory);
-
+
// Delete any previous version store file
if(ppreviousVerStoreFile != 0)
{
delete ppreviousVerStoreFile;
ppreviousVerStoreFile = 0;
}
-
+
// Don't worry about the incremented number in the store info
throw;
}
-
+
// Check logic
ASSERT(ppreviousVerStoreFile == 0);
-
+
// Modify the store info
+ mapStoreInfo->AdjustNumCurrentFiles(adjustment.mNumCurrentFiles);
+ mapStoreInfo->AdjustNumOldFiles(adjustment.mNumOldFiles);
+ mapStoreInfo->AdjustNumDeletedFiles(adjustment.mNumDeletedFiles);
+ mapStoreInfo->AdjustNumDirectories(adjustment.mNumDirectories);
+ mapStoreInfo->ChangeBlocksUsed(adjustment.mBlocksUsed);
+ mapStoreInfo->ChangeBlocksInCurrentFiles(adjustment.mBlocksInCurrentFiles);
+ mapStoreInfo->ChangeBlocksInOldFiles(adjustment.mBlocksInOldFiles);
+ mapStoreInfo->ChangeBlocksInDeletedFiles(adjustment.mBlocksInDeletedFiles);
+ mapStoreInfo->ChangeBlocksInDirectories(adjustment.mBlocksInDirectories);
- if(DiffFromFileID == 0)
- {
- mapStoreInfo->AdjustNumFiles(1);
- }
- else
- {
- mapStoreInfo->AdjustNumOldFiles(1);
- }
-
- mapStoreInfo->ChangeBlocksUsed(newObjectBlocksUsed);
- mapStoreInfo->ChangeBlocksInCurrentFiles(newObjectBlocksUsed -
- blocksInOldFiles);
- mapStoreInfo->ChangeBlocksInOldFiles(blocksInOldFiles);
-
// Increment reference count on the new directory to one
mapRefCount->AddReference(id);
-
+
// Save the store info -- can cope if this exceptions because infomation
// will be rebuilt by housekeeping, and ID allocation can recover.
SaveStoreInfo(false);
-
+
// Return the ID to the caller
return id;
}
@@ -728,8 +795,11 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupStoreContext::DeleteFile(const BackupStoreFilename &, int64_t, int64_t &)
-// Purpose: Deletes a file, returning true if the file existed. Object ID returned too, set to zero if not found.
+// Name: BackupStoreContext::DeleteFile(
+// const BackupStoreFilename &, int64_t, int64_t &)
+// Purpose: Deletes a file by name, returning true if the file
+// existed. Object ID returned too, set to zero if not
+// found.
// Created: 2003/10/21
//
// --------------------------------------------------------------------------
@@ -754,9 +824,6 @@ bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_
bool madeChanges = false;
rObjectIDOut = 0; // not found
- // Count of deleted blocks
- int64_t blocksDel = 0;
-
try
{
// Iterate through directory, only looking at files which haven't been deleted
@@ -769,14 +836,28 @@ bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_
if(e->GetName() == rFilename)
{
// Check that it's definately not already deleted
- ASSERT((e->GetFlags() & BackupStoreDirectory::Entry::Flags_Deleted) == 0);
+ ASSERT(!e->IsDeleted());
// Set deleted flag
e->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
// Mark as made a change
madeChanges = true;
- // Can safely do this, because we know we won't be here if it's already
- // an old version
- blocksDel += e->GetSizeInBlocks();
+
+ int64_t blocks = e->GetSizeInBlocks();
+ mapStoreInfo->AdjustNumDeletedFiles(1);
+ mapStoreInfo->ChangeBlocksInDeletedFiles(blocks);
+
+ // We're marking all old versions as deleted.
+ // This is how a file can be old and deleted
+ // at the same time. So we don't subtract from
+ // number or size of old files. But if it was
+ // a current file, then it's not any more, so
+ // we do need to adjust the current counts.
+ if(!e->IsOld())
+ {
+ mapStoreInfo->AdjustNumCurrentFiles(-1);
+ mapStoreInfo->ChangeBlocksInCurrentFiles(-blocks);
+ }
+
// Is this the last version?
if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
{
@@ -786,19 +867,12 @@ bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_
}
}
}
-
+
// Save changes?
if(madeChanges)
{
// Save the directory back
- SaveDirectory(dir, InDirectory);
-
- // Modify the store info, and write
- // It definitely wasn't an old or deleted version
- mapStoreInfo->AdjustNumFiles(-1);
- mapStoreInfo->AdjustNumDeletedFiles(1);
- mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
-
+ SaveDirectory(dir);
SaveStoreInfo(false);
}
}
@@ -871,16 +945,16 @@ bool BackupStoreContext::UndeleteFile(int64_t ObjectID, int64_t InDirectory)
}
}
}
-
+
// Save changes?
if(madeChanges)
{
// Save the directory back
- SaveDirectory(dir, InDirectory);
-
+ SaveDirectory(dir);
+
// Modify the store info, and write
mapStoreInfo->ChangeBlocksInDeletedFiles(blocksDel);
-
+
// Maybe postponed save of store info
SaveStoreInfo();
}
@@ -919,27 +993,27 @@ void BackupStoreContext::RemoveDirectoryFromCache(int64_t ObjectID)
// --------------------------------------------------------------------------
//
// Function
-// Name: BackupStoreContext::SaveDirectory(BackupStoreDirectory &, int64_t)
+// Name: BackupStoreContext::SaveDirectory(BackupStoreDirectory &)
// Purpose: Save directory back to disc, update time in cache
// Created: 2003/09/04
//
// --------------------------------------------------------------------------
-void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t ObjectID)
+void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir)
{
if(mapStoreInfo.get() == 0)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
- if(rDir.GetObjectID() != ObjectID)
- {
- THROW_EXCEPTION(BackupStoreException, Internal)
- }
+
+ int64_t ObjectID = rDir.GetObjectID();
try
{
// Write to disc, adjust size in store info
std::string dirfn;
MakeObjectFilename(ObjectID, dirfn);
+ int64_t old_dir_size = rDir.GetUserInfo1_SizeInBlocks();
+
{
RaidFileWrite writeDir(mStoreDiscSet, dirfn);
writeDir.Open(true /* allow overwriting */);
@@ -953,7 +1027,7 @@ void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t Objec
// Commit directory
writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
-
+
// Make sure the size of the directory is available for writing the dir back
ASSERT(dirSize > 0);
int64_t sizeAdjustment = dirSize - rDir.GetUserInfo1_SizeInBlocks();
@@ -962,6 +1036,7 @@ void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t Objec
// Update size stored in directory
rDir.SetUserInfo1_SizeInBlocks(dirSize);
}
+
// Refresh revision ID in cache
{
int64_t revid = 0;
@@ -969,8 +1044,41 @@ void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t Objec
{
THROW_EXCEPTION(BackupStoreException, Internal)
}
+
+ BOX_TRACE("Saved directory " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ ", modtime = " << revid);
+
rDir.SetRevisionID(revid);
}
+
+ // Update the directory entry in the grandparent, to ensure
+ // that it reflects the current size of the parent directory.
+ int64_t new_dir_size = rDir.GetUserInfo1_SizeInBlocks();
+ if(new_dir_size != old_dir_size &&
+ ObjectID != BACKUPSTORE_ROOT_DIRECTORY_ID)
+ {
+ int64_t ContainerID = rDir.GetContainerID();
+ BackupStoreDirectory& parent(
+ GetDirectoryInternal(ContainerID));
+ // rDir is now invalid
+ BackupStoreDirectory::Entry* en =
+ parent.FindEntryByID(ObjectID);
+ if(!en)
+ {
+ BOX_ERROR("Missing entry for directory " <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
+ " in directory " <<
+ BOX_FORMAT_OBJECTID(ContainerID) <<
+ " while trying to update dir size in parent");
+ }
+ else
+ {
+ ASSERT(en->GetSizeInBlocks() == old_dir_size);
+ en->SetSizeInBlocks(new_dir_size);
+ SaveDirectory(parent);
+ }
+ }
}
catch(...)
{
@@ -991,20 +1099,26 @@ void BackupStoreContext::SaveDirectory(BackupStoreDirectory &rDir, int64_t Objec
// Created: 2003/09/04
//
// --------------------------------------------------------------------------
-int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreFilename &rFilename, const StreamableMemBlock &Attributes, int64_t AttributesModTime, bool &rAlreadyExists)
+int64_t BackupStoreContext::AddDirectory(int64_t InDirectory,
+ const BackupStoreFilename &rFilename,
+ const StreamableMemBlock &Attributes,
+ int64_t AttributesModTime,
+ int64_t ModificationTime,
+ bool &rAlreadyExists)
{
if(mapStoreInfo.get() == 0)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
+
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
}
-
+
// Flags as not already existing
rAlreadyExists = false;
-
+
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
@@ -1030,19 +1144,31 @@ int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreF
// Create an empty directory with the given attributes on disc
std::string fn;
MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
+ int64_t dirSize;
+
{
BackupStoreDirectory emptyDir(id, InDirectory);
// add the atttribues
emptyDir.SetAttributes(Attributes, AttributesModTime);
-
+
// Write...
RaidFileWrite dirFile(mStoreDiscSet, fn);
dirFile.Open(false /* no overwriting */);
emptyDir.WriteToStream(dirFile);
// Get disc usage, before it's commited
- int64_t dirSize = dirFile.GetDiscUsageInBlocks();
+ dirSize = dirFile.GetDiscUsageInBlocks();
+
+ // Exceeds the hard limit?
+ int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() +
+ dirSize;
+ if(newTotalBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
+ {
+ THROW_EXCEPTION(BackupStoreException, AddedFileExceedsStorageLimit)
+ // The file will be deleted automatically by the RaidFile object
+ }
+
// Commit the file
- dirFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
+ dirFile.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
// Make sure the size of the directory is added to the usage counts in the info
ASSERT(dirSize > 0);
@@ -1050,12 +1176,14 @@ int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreF
mapStoreInfo->ChangeBlocksInDirectories(dirSize);
// Not added to cache, so don't set the size in the directory
}
-
+
// Then add it into the parent directory
try
{
- dir.AddEntry(rFilename, 0 /* modification time */, id, 0 /* blocks used */, BackupStoreDirectory::Entry::Flags_Dir, 0 /* attributes mod time */);
- SaveDirectory(dir, InDirectory);
+ dir.AddEntry(rFilename, ModificationTime, id, dirSize,
+ BackupStoreDirectory::Entry::Flags_Dir,
+ 0 /* attributes hash */);
+ SaveDirectory(dir);
// Increment reference count on the new directory to one
mapRefCount->AddReference(id);
@@ -1065,12 +1193,12 @@ int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreF
// Back out on adding that directory
RaidFileWrite del(mStoreDiscSet, fn);
del.Delete();
-
+
// Remove this entry from the cache
RemoveDirectoryFromCache(InDirectory);
-
+
// Don't worry about the incremented number in the store info
- throw;
+ throw;
}
// Save the store info (may not be postponed)
@@ -1081,6 +1209,7 @@ int64_t BackupStoreContext::AddDirectory(int64_t InDirectory, const BackupStoreF
return id;
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -1096,6 +1225,7 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
+
if(mReadOnly)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
@@ -1103,9 +1233,6 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
// Containing directory
int64_t InDirectory = 0;
-
- // Count of blocks deleted
- int64_t blocksDeleted = 0;
try
{
@@ -1113,18 +1240,18 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
{
// In block, because dir may not be valid after the delete directory call
BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-
+
// Store the directory it's in for later
InDirectory = dir.GetContainerID();
// Depth first delete of contents
- DeleteDirectoryRecurse(ObjectID, blocksDeleted, Undelete);
+ DeleteDirectoryRecurse(ObjectID, Undelete);
}
-
+
// Remove the entry from the directory it's in
ASSERT(InDirectory != 0);
BackupStoreDirectory &parentDir(GetDirectoryInternal(InDirectory));
-
+
BackupStoreDirectory::Iterator i(parentDir);
BackupStoreDirectory::Entry *en = 0;
while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
@@ -1141,18 +1268,16 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
{
en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
}
-
+
// Save it
- SaveDirectory(parentDir, InDirectory);
-
+ SaveDirectory(parentDir);
+
// Done
break;
}
}
-
+
// Update blocks deleted count
- mapStoreInfo->ChangeBlocksInDeletedFiles(Undelete?(0 - blocksDeleted):(blocksDeleted));
- mapStoreInfo->AdjustNumDirectories(-1);
SaveStoreInfo(false);
}
catch(...)
@@ -1162,6 +1287,7 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
}
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -1170,18 +1296,18 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
// Created: 2003/10/21
//
// --------------------------------------------------------------------------
-void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBlocksDeletedOut, bool Undelete)
+void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, bool Undelete)
{
try
{
// Does things carefully to avoid using a directory in the cache after recursive call
// because it may have been deleted.
-
+
// Do sub directories
{
// Get the directory...
BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-
+
// Then scan it for directories
std::vector<int64_t> subDirs;
BackupStoreDirectory::Iterator i(dir);
@@ -1204,30 +1330,46 @@ void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBloc
subDirs.push_back(en->GetObjectID());
}
}
-
+
// Done with the directory for now. Recurse to sub directories
for(std::vector<int64_t>::const_iterator i = subDirs.begin(); i != subDirs.end(); ++i)
{
- DeleteDirectoryRecurse((*i), rBlocksDeletedOut, Undelete);
+ DeleteDirectoryRecurse(*i, Undelete);
}
}
-
+
// Then, delete the files. Will need to load the directory again because it might have
// been removed from the cache.
{
// Get the directory...
BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-
+
// Changes made?
bool changesMade = false;
-
- // Run through files
+
+ // Run through files
BackupStoreDirectory::Iterator i(dir);
BackupStoreDirectory::Entry *en = 0;
while((en = i.Next(Undelete?(BackupStoreDirectory::Entry::Flags_Deleted):(BackupStoreDirectory::Entry::Flags_INCLUDE_EVERYTHING),
Undelete?(0):(BackupStoreDirectory::Entry::Flags_Deleted))) != 0) // Ignore deleted directories (or not deleted if Undelete)
{
+ // Keep count of the deleted blocks
+ if(en->IsFile())
+ {
+ int64_t size = en->GetSizeInBlocks();
+ ASSERT(en->IsDeleted() == Undelete);
+ // Don't adjust counters for old files,
+ // because it can be both old and deleted.
+ if(!en->IsOld())
+ {
+ mapStoreInfo->ChangeBlocksInCurrentFiles(Undelete ? size : -size);
+ mapStoreInfo->AdjustNumCurrentFiles(Undelete ? 1 : -1);
+ }
+ mapStoreInfo->ChangeBlocksInDeletedFiles(Undelete ? -size : size);
+ mapStoreInfo->AdjustNumDeletedFiles(Undelete ? -1 : 1);
+ }
+
// Add/remove the deleted flags
if(Undelete)
{
@@ -1237,21 +1379,15 @@ void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBloc
{
en->AddFlags(BackupStoreDirectory::Entry::Flags_Deleted);
}
-
- // Keep count of the deleted blocks
- if((en->GetFlags() & BackupStoreDirectory::Entry::Flags_File) != 0)
- {
- rBlocksDeletedOut += en->GetSizeInBlocks();
- }
-
+
// Did something
changesMade = true;
}
-
+
// Save the directory
if(changesMade)
{
- SaveDirectory(dir, ObjectID);
+ SaveDirectory(dir);
}
}
}
@@ -1263,7 +1399,6 @@ void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, int64_t &rBloc
}
-
// --------------------------------------------------------------------------
//
// Function
@@ -1284,15 +1419,15 @@ void BackupStoreContext::ChangeDirAttributes(int64_t Directory, const Streamable
}
try
- {
+ {
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(Directory));
-
+
// Set attributes
dir.SetAttributes(Attributes, AttributesModTime);
-
+
// Save back
- SaveDirectory(dir, Directory);
+ SaveDirectory(dir);
}
catch(...)
{
@@ -1301,6 +1436,7 @@ void BackupStoreContext::ChangeDirAttributes(int64_t Directory, const Streamable
}
}
+
// --------------------------------------------------------------------------
//
// Function
@@ -1324,7 +1460,7 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
{
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = 0;
// Iterate through current versions of files, only
@@ -1338,10 +1474,10 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
{
// Set attributes
en->SetAttributes(Attributes, AttributesHash);
-
+
// Tell caller the object ID
rObjectIDOut = en->GetObjectID();
-
+
// Done
break;
}
@@ -1351,16 +1487,16 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
// Didn't find it
return false;
}
-
+
// Save back
- SaveDirectory(dir, InDirectory);
+ SaveDirectory(dir);
}
catch(...)
{
RemoveDirectoryFromCache(InDirectory);
throw;
}
-
+
// Changed, everything OK
return true;
}
@@ -1380,7 +1516,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
// Note that we need to allow object IDs a little bit greater than the last one in the store info,
// because the store info may not have got saved in an error condition. Max greater ID is
// STORE_INFO_SAVE_DELAY in this case, *2 to be safe.
@@ -1389,7 +1525,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
// Obviously bad object ID
return false;
}
-
+
// Test to see if it exists on the disc
std::string filename;
MakeObjectFilename(ObjectID, filename);
@@ -1398,7 +1534,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
// RaidFile reports no file there
return false;
}
-
+
// Do we need to be more specific?
if(MustBe != ObjectExists_Anything)
{
@@ -1406,7 +1542,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
std::auto_ptr<RaidFileRead> objectFile(RaidFileRead::Open(mStoreDiscSet, filename));
// Read the first integer
- u_int32_t magic;
+ uint32_t magic;
if(!objectFile->ReadFullBuffer(&magic, sizeof(magic), 0 /* not interested in how many read if failure */))
{
// Failed to get any bytes, must have failed
@@ -1422,17 +1558,17 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
#endif
// Right one?
- u_int32_t requiredMagic = (MustBe == ObjectExists_File)?OBJECTMAGIC_FILE_MAGIC_VALUE_V1:OBJECTMAGIC_DIR_MAGIC_VALUE;
-
+ uint32_t requiredMagic = (MustBe == ObjectExists_File)?OBJECTMAGIC_FILE_MAGIC_VALUE_V1:OBJECTMAGIC_DIR_MAGIC_VALUE;
+
// Check
if(ntohl(magic) != requiredMagic)
{
return false;
}
-
+
// File is implicitly closed
}
-
+
return true;
}
@@ -1451,7 +1587,7 @@ std::auto_ptr<IOStream> BackupStoreContext::OpenObject(int64_t ObjectID)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
// Attempt to open the file
std::string fn;
MakeObjectFilename(ObjectID, fn);
@@ -1473,7 +1609,7 @@ int64_t BackupStoreContext::GetClientStoreMarker()
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
return mapStoreInfo->GetClientStoreMarker();
}
@@ -1536,7 +1672,7 @@ void BackupStoreContext::SetClientStoreMarker(int64_t ClientStoreMarker)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
}
-
+
mapStoreInfo->SetClientStoreMarker(ClientStoreMarker);
SaveStoreInfo(false /* don't delay saving this */);
}
@@ -1550,7 +1686,9 @@ void BackupStoreContext::SetClientStoreMarker(int64_t ClientStoreMarker)
// Created: 12/11/03
//
// --------------------------------------------------------------------------
-void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory, int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename, bool MoveAllWithSameName, bool AllowMoveOverDeletedObject)
+void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
+ int64_t MoveToDirectory, const BackupStoreFilename &rNewFilename,
+ bool MoveAllWithSameName, bool AllowMoveOverDeletedObject)
{
if(mReadOnly)
{
@@ -1561,7 +1699,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
int64_t targetSearchExcludeFlags = (AllowMoveOverDeletedObject)
?(BackupStoreDirectory::Entry::Flags_Deleted)
:(BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING);
-
+
// Special case if the directories are the same...
if(MoveFromDirectory == MoveToDirectory)
{
@@ -1569,16 +1707,16 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Get the first directory
BackupStoreDirectory &dir(GetDirectoryInternal(MoveFromDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = dir.FindEntryByID(ObjectID);
-
+
// Error if not found
if(en == 0)
{
THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
}
-
+
// Check the new name doens't already exist (optionally ignoring deleted files)
{
BackupStoreDirectory::Iterator i(dir);
@@ -1591,7 +1729,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
}
}
}
-
+
// Need to get all the entries with the same name?
if(MoveAllWithSameName)
{
@@ -1612,45 +1750,46 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
// Just copy this one
en->SetName(rNewFilename);
}
-
+
// Save the directory back
- SaveDirectory(dir, MoveFromDirectory);
+ SaveDirectory(dir);
}
catch(...)
{
RemoveDirectoryFromCache(MoveToDirectory); // either will do, as they're the same
throw;
}
-
+
return;
}
- // Got to be careful how this is written, as we can't guarentte that if we have two
- // directories open, the first won't be deleted as the second is opened. (cache)
+ // Got to be careful how this is written, as we can't guarantee that
+ // if we have two directories open, the first won't be deleted as the
+ // second is opened. (cache)
// List of entries to move
std::vector<BackupStoreDirectory::Entry *> moving;
-
+
// list of directory IDs which need to have containing dir id changed
std::vector<int64_t> dirsToChangeContainingID;
try
{
// First of all, get copies of the entries to move to the to directory.
-
+
{
// Get the first directory
BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = from.FindEntryByID(ObjectID);
-
+
// Error if not found
if(en == 0)
{
THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
}
-
+
// Need to get all the entries with the same name?
if(MoveAllWithSameName)
{
@@ -1663,7 +1802,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Copy
moving.push_back(new BackupStoreDirectory::Entry(*c));
-
+
// Check for containing directory correction
if(c->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(c->GetObjectID());
}
@@ -1679,13 +1818,13 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
if(en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(en->GetObjectID());
}
}
-
+
// Secondly, insert them into the to directory, and save it
-
+
{
// To directory
BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
-
+
// Check the new name doens't already exist
{
BackupStoreDirectory::Iterator i(to);
@@ -1698,7 +1837,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
}
}
}
-
+
// Copy the entries into it, changing the name as we go
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
@@ -1706,9 +1845,9 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
en->SetName(rNewFilename);
to.AddEntry(*en); // adds copy
}
-
+
// Save back
- SaveDirectory(to, MoveToDirectory);
+ SaveDirectory(to);
}
// Thirdly... remove them from the first directory -- but if it fails, attempt to delete them from the to directory
@@ -1716,57 +1855,57 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Get directory
BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
-
+
// Delete each one
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
from.DeleteEntry((*i)->GetObjectID());
}
-
+
// Save back
- SaveDirectory(from, MoveFromDirectory);
+ SaveDirectory(from);
}
catch(...)
{
// UNDO modification to To directory
-
+
// Get directory
BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
-
+
// Delete each one
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
to.DeleteEntry((*i)->GetObjectID());
}
-
+
// Save back
- SaveDirectory(to, MoveToDirectory);
+ SaveDirectory(to);
// Throw the error
throw;
}
-
+
// Finally... for all the directories we moved, modify their containing directory ID
for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
{
// Load the directory
BackupStoreDirectory &change(GetDirectoryInternal(*i));
-
+
// Modify containing dir ID
change.SetContainerID(MoveToDirectory);
-
+
// Save it back
- SaveDirectory(change, *i);
+ SaveDirectory(change);
}
}
catch(...)
{
- // Make sure directories aren't in the cache, as they may have been modified
+ // Make sure directories aren't in the cache, as they may have been modified
RemoveDirectoryFromCache(MoveToDirectory);
RemoveDirectoryFromCache(MoveFromDirectory);
for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
{
- RemoveDirectoryFromCache(*i);
+ RemoveDirectoryFromCache(*i);
}
while(!moving.empty())
@@ -1775,7 +1914,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
moving.pop_back();
}
throw;
- }
+ }
// Clean up
while(!moving.empty())
@@ -1786,7 +1925,6 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
}
-
// --------------------------------------------------------------------------
//
// Function
@@ -1801,8 +1939,7 @@ const BackupStoreInfo &BackupStoreContext::GetBackupStoreInfo() const
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
return *(mapStoreInfo.get());
}
-