summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChris Wilson <chris+github@qwirx.com>2014-10-31 22:09:34 +0000
committerChris Wilson <chris+github@qwirx.com>2014-10-31 22:09:34 +0000
commit6d52b312e556356030edd6ec5904faa63f5b9778 (patch)
treea51f6cd249f0c9b1e09a744c61d3e6b4a2afc210 /lib
parentd95ae43d507af4f02749f1dd99500f3220253002 (diff)
Whitespace, comment and readability fixes
Diffstat (limited to 'lib')
-rw-r--r--lib/backupstore/BackupCommands.cpp126
-rw-r--r--lib/backupstore/BackupStoreCheck.cpp68
-rw-r--r--lib/backupstore/BackupStoreCheck.h2
-rw-r--r--lib/backupstore/BackupStoreCheck2.cpp89
-rw-r--r--lib/backupstore/BackupStoreContext.cpp175
-rw-r--r--lib/backupstore/BackupStoreContext.h10
-rw-r--r--lib/backupstore/BackupStoreDirectory.cpp120
-rw-r--r--lib/backupstore/BackupStoreDirectory.h15
-rw-r--r--lib/backupstore/BackupStoreFilenameClear.h2
-rw-r--r--lib/backupstore/HousekeepStoreAccount.cpp81
-rw-r--r--lib/backupstore/backupprotocol.txt1
11 files changed, 346 insertions, 343 deletions
diff --git a/lib/backupstore/BackupCommands.cpp b/lib/backupstore/BackupCommands.cpp
index 8b485d0c..c32987fe 100644
--- a/lib/backupstore/BackupCommands.cpp
+++ b/lib/backupstore/BackupCommands.cpp
@@ -93,7 +93,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolLogin::DoCommand(BackupProtoc
// and that the client actually has an account on this machine
if(mClientID != rContext.GetClientID())
{
- BOX_WARNING("Failed login from client ID " <<
+ BOX_WARNING("Failed login from client ID " <<
BOX_FORMAT_ACCOUNT(mClientID) << ": "
"wrong certificate for this account");
return PROTOCOL_ERROR(Err_BadLogin);
@@ -101,7 +101,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolLogin::DoCommand(BackupProtoc
if(!rContext.GetClientHasAccount())
{
- BOX_WARNING("Failed login from client ID " <<
+ BOX_WARNING("Failed login from client ID " <<
BOX_FORMAT_ACCOUNT(mClientID) << ": "
"no such account on this server");
return PROTOCOL_ERROR(Err_BadLogin);
@@ -117,17 +117,17 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolLogin::DoCommand(BackupProtoc
BOX_FORMAT_ACCOUNT(mClientID));
return PROTOCOL_ERROR(Err_CannotLockStoreForWriting);
}
-
+
// Debug: check we got the lock
ASSERT(!rContext.SessionIsReadOnly());
}
-
+
// Load the store info
rContext.LoadStoreInfo();
if(!rContext.GetBackupStoreInfo().IsAccountEnabled())
{
- BOX_WARNING("Refused login from disabled client ID " <<
+ BOX_WARNING("Refused login from disabled client ID " <<
BOX_FORMAT_ACCOUNT(mClientID));
return PROTOCOL_ERROR(Err_DisabledAccount);
}
@@ -137,9 +137,9 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolLogin::DoCommand(BackupProtoc
// Mark the next phase
rContext.SetPhase(BackupStoreContext::Phase_Commands);
-
+
// Log login
- BOX_NOTICE("Login from Client ID " <<
+ BOX_NOTICE("Login from Client ID " <<
BOX_FORMAT_ACCOUNT(mClientID) << " "
"(name=" << rContext.GetAccountName() << "): " <<
(((mFlags & Flags_ReadOnly) != Flags_ReadOnly)
@@ -166,7 +166,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolFinished::DoCommand(BackupPro
{
// can be called in any phase
- BOX_NOTICE("Session finished for Client ID " <<
+ BOX_NOTICE("Session finished for Client ID " <<
BOX_FORMAT_ACCOUNT(rContext.GetClientID()) << " "
"(name=" << rContext.GetAccountName() << ")");
@@ -197,11 +197,11 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolListDirectory::DoCommand(Back
// Ask the context for a directory
const BackupStoreDirectory &rdir(
rContext.GetDirectory(mObjectID));
- rdir.WriteToStream(*stream, mFlagsMustBeSet,
+ rdir.WriteToStream(*stream, mFlagsMustBeSet,
mFlagsNotToBeSet, mSendAttributes,
false /* never send dependency info to the client */);
}
- catch (RaidFileException &e)
+ catch(RaidFileException &e)
{
if (e.GetSubType() == RaidFileException::RaidFileDoesntExist)
{
@@ -211,7 +211,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolListDirectory::DoCommand(Back
}
stream->SetForReading();
-
+
// Get the protocol to send the stream
rProtocol.SendStreamAfterCommand(static_cast< std::auto_ptr<IOStream> > (stream));
@@ -240,7 +240,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolStoreFile::DoCommand(
{
return hookResult;
}
-
+
// Check that the diff from file actually exists, if it's specified
if(mDiffFromFileID != 0)
{
@@ -250,7 +250,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolStoreFile::DoCommand(
return PROTOCOL_ERROR(Err_DiffFromFileDoesNotExist);
}
}
-
+
// Ask the context to store it
int64_t id = 0;
try
@@ -275,7 +275,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolStoreFile::DoCommand(
throw;
}
}
-
+
// Tell the caller what the file ID was
return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(id));
}
@@ -315,7 +315,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObject::DoCommand(BackupPr
//
// Function
// Name: BackupProtocolGetFile::DoCommand(Protocol &, BackupStoreContext &)
-// Purpose: Command to get an file object from the server -- may have to do a bit of
+// Purpose: Command to get an file object from the server -- may have to do a bit of
// work to get the object.
// Created: 2003/09/03
//
@@ -357,13 +357,13 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetFile::DoCommand(BackupProt
en = rdir.FindEntryByID(id);
if(en == 0)
{
- BOX_ERROR("Object " <<
+ BOX_ERROR("Object " <<
BOX_FORMAT_OBJECTID(mObjectID) <<
- " in dir " <<
+ " in dir " <<
BOX_FORMAT_OBJECTID(mInDirectory) <<
" for account " <<
BOX_FORMAT_ACCOUNT(rContext.GetClientID()) <<
- " references object " <<
+ " references object " <<
BOX_FORMAT_OBJECTID(id) <<
" which does not exist in dir");
return PROTOCOL_ERROR(Err_PatchConsistencyError);
@@ -371,73 +371,73 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetFile::DoCommand(BackupProt
id = en->GetDependsNewer();
}
while(en != 0 && id != 0);
-
+
// OK! The last entry in the chain is the full file, the others are patches back from it.
// Open the last one, which is the current from file
std::auto_ptr<IOStream> from(rContext.OpenObject(patchChain[patchChain.size() - 1]));
-
+
// Then, for each patch in the chain, do a combine
for(int p = ((int)patchChain.size()) - 2; p >= 0; --p)
{
// ID of patch
int64_t patchID = patchChain[p];
-
+
// Open it a couple of times
std::auto_ptr<IOStream> diff(rContext.OpenObject(patchID));
std::auto_ptr<IOStream> diff2(rContext.OpenObject(patchID));
-
+
// Choose a temporary filename for the result of the combination
std::ostringstream fs;
fs << rContext.GetAccountRoot() << ".recombinetemp." << p;
- std::string tempFn =
+ std::string tempFn =
RaidFileController::DiscSetPathToFileSystemPath(
rContext.GetStoreDiscSet(), fs.str(),
p + 16);
-
+
// Open the temporary file
std::auto_ptr<IOStream> combined(
new InvisibleTempFileStream(
tempFn, O_RDWR | O_CREAT | O_EXCL |
O_BINARY | O_TRUNC));
-
+
// Do the combining
BackupStoreFile::CombineFile(*diff, *diff2, *from, *combined);
-
+
// Move to the beginning of the combined file
combined->Seek(0, IOStream::SeekType_Absolute);
-
+
// Then shuffle round for the next go
if (from.get()) from->Close();
from = combined;
}
-
+
// Now, from contains a nice file to send to the client. Reorder it
{
// Write nastily to allow this to work with gcc 2.x
std::auto_ptr<IOStream> t(BackupStoreFile::ReorderFileToStreamOrder(from.get(), true /* take ownership */));
stream = t;
}
-
+
// Release from file to avoid double deletion
from.release();
}
else
{
// Simple case: file already exists on disc ready to go
-
+
// Open the object
std::auto_ptr<IOStream> object(rContext.OpenObject(mObjectID));
BufferedStream buf(*object);
-
+
// Verify it
if(!BackupStoreFile::VerifyEncodedFileFormat(buf))
{
return PROTOCOL_ERROR(Err_FileDoesNotVerify);
}
-
+
// Reset stream -- seek to beginning
object->Seek(0, IOStream::SeekType_Absolute);
-
+
// Reorder the stream/file into stream order
{
// Write nastily to allow this to work with gcc 2.x
@@ -445,15 +445,15 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetFile::DoCommand(BackupProt
stream = t;
}
- // Object will be deleted when the stream is deleted,
- // so can release the object auto_ptr here to avoid
+ // Object will be deleted when the stream is deleted,
+ // so can release the object auto_ptr here to avoid
// premature deletion
object.release();
}
// Stream the reordered stream to the peer
rProtocol.SendStreamAfterCommand(stream);
-
+
// Tell the caller what the file was
return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolSuccess(mObjectID));
}
@@ -493,12 +493,12 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolCreateDirectory2::DoCommand(
{
CHECK_PHASE(Phase_Commands)
CHECK_WRITEABLE_SESSION
-
- // Collect the attributes -- do this now so no matter what the outcome,
+
+ // Collect the attributes -- do this now so no matter what the outcome,
// the data has been absorbed.
StreamableMemBlock attr;
attr.Set(rDataStream, rProtocol.GetTimeout());
-
+
// Check to see if the hard limit has been exceeded
if(rContext.HardLimitExceeded())
{
@@ -553,7 +553,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolChangeDirAttributes::DoComman
CHECK_PHASE(Phase_Commands)
CHECK_WRITEABLE_SESSION
- // Collect the attributes -- do this now so no matter what the outcome,
+ // Collect the attributes -- do this now so no matter what the outcome,
// the data has been absorbed.
StreamableMemBlock attr;
attr.Set(rDataStream, rProtocol.GetTimeout());
@@ -582,7 +582,7 @@ BackupProtocolSetReplacementFileAttributes::DoCommand(
CHECK_PHASE(Phase_Commands)
CHECK_WRITEABLE_SESSION
- // Collect the attributes -- do this now so no matter what the outcome,
+ // Collect the attributes -- do this now so no matter what the outcome,
// the data has been absorbed.
StreamableMemBlock attr;
attr.Set(rDataStream, rProtocol.GetTimeout());
@@ -600,7 +600,6 @@ BackupProtocolSetReplacementFileAttributes::DoCommand(
}
-
// --------------------------------------------------------------------------
//
// Function
@@ -671,7 +670,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolDeleteDirectory::DoCommand(Ba
{
rContext.DeleteDirectory(mObjectID);
}
- catch (BackupStoreException &e)
+ catch(BackupStoreException &e)
{
if(e.GetSubType() == BackupStoreException::MultiplyReferencedObject)
{
@@ -745,7 +744,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolMoveObject::DoCommand(BackupP
{
CHECK_PHASE(Phase_Commands)
CHECK_WRITEABLE_SESSION
-
+
// Let context do this, but modify error reporting on exceptions...
try
{
@@ -785,21 +784,21 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolMoveObject::DoCommand(BackupP
std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObjectName::DoCommand(BackupProtocolReplyable &rProtocol, BackupStoreContext &rContext) const
{
CHECK_PHASE(Phase_Commands)
-
+
// Create a stream for the list of filenames
std::auto_ptr<CollectInBufferStream> stream(new CollectInBufferStream);
// Object and directory IDs
int64_t objectID = mObjectID;
int64_t dirID = mContainingDirectoryID;
-
+
// Data to return in the reply
int32_t numNameElements = 0;
int16_t objectFlags = 0;
int64_t modTime = 0;
uint64_t attrModHash = 0;
bool haveModTimes = false;
-
+
do
{
// Check the directory really exists
@@ -822,13 +821,13 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObjectName::DoCommand(Back
// Abort!
return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolObjectName(BackupProtocolObjectName::NumNameElements_ObjectDoesntExist, 0, 0, 0));
}
-
+
// Store flags?
if(objectFlags == 0)
{
objectFlags = en->GetFlags();
}
-
+
// Store modification times?
if(!haveModTimes)
{
@@ -836,14 +835,14 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObjectName::DoCommand(Back
attrModHash = en->GetAttributesHash();
haveModTimes = true;
}
-
+
// Store the name in the stream
en->GetName().WriteToStream(*stream);
-
+
// Count of name elements
++numNameElements;
}
-
+
// Setup for next time round
objectID = dirID;
dirID = rdir.GetContainerID();
@@ -854,7 +853,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetObjectName::DoCommand(Back
if(numNameElements > 0)
{
// Get the stream ready to go
- stream->SetForReading();
+ stream->SetForReading();
// Tell the protocol to send the stream
rProtocol.SendStreamAfterCommand(static_cast< std::auto_ptr<IOStream> >(stream));
}
@@ -879,10 +878,10 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByID::DoCommand(
// Open the file
std::auto_ptr<IOStream> stream(rContext.OpenObject(mObjectID));
-
+
// Move the file pointer to the block index
BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
-
+
// Return the stream to the client
rProtocol.SendStreamAfterCommand(stream);
@@ -905,7 +904,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByName::DoComman
// Get the directory
const BackupStoreDirectory &dir(rContext.GetDirectory(mInDirectory));
-
+
// Find the latest object ID within it which has the same name
int64_t objectID = 0;
BackupStoreDirectory::Iterator i(dir);
@@ -921,7 +920,7 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByName::DoComman
}
}
}
-
+
// Found anything?
if(objectID == 0)
{
@@ -931,10 +930,10 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetBlockIndexByName::DoComman
// Open the file
std::auto_ptr<IOStream> stream(rContext.OpenObject(objectID));
-
+
// Move the file pointer to the block index
BackupStoreFile::MoveStreamPositionToBlockIndex(*stream);
-
+
// Return the stream to the client
rProtocol.SendStreamAfterCommand(stream);
@@ -957,11 +956,11 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetAccountUsage::DoCommand(Ba
// Get store info from context
const BackupStoreInfo &rinfo(rContext.GetBackupStoreInfo());
-
+
// Find block size
RaidFileController &rcontroller(RaidFileController::GetController());
RaidFileDiscSet &rdiscSet(rcontroller.GetDiscSet(rinfo.GetDiscSetNumber()));
-
+
// Return info
return std::auto_ptr<BackupProtocolMessage>(new BackupProtocolAccountUsage(
rinfo.GetBlocksUsed(),
@@ -1007,11 +1006,11 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetAccountUsage2::DoCommand(
// Get store info from context
const BackupStoreInfo &info(rContext.GetBackupStoreInfo());
-
+
// Find block size
RaidFileController &rcontroller(RaidFileController::GetController());
RaidFileDiscSet &rdiscSet(rcontroller.GetDiscSet(info.GetDiscSetNumber()));
-
+
// Return info
BackupProtocolAccountUsage2* usage = new BackupProtocolAccountUsage2();
std::auto_ptr<BackupProtocolMessage> reply(usage);
@@ -1036,4 +1035,3 @@ std::auto_ptr<BackupProtocolMessage> BackupProtocolGetAccountUsage2::DoCommand(
return reply;
}
-
diff --git a/lib/backupstore/BackupStoreCheck.cpp b/lib/backupstore/BackupStoreCheck.cpp
index 05a901fa..1b314eec 100644
--- a/lib/backupstore/BackupStoreCheck.cpp
+++ b/lib/backupstore/BackupStoreCheck.cpp
@@ -112,14 +112,14 @@ void BackupStoreCheck::Check()
BOX_INFO("Phase 1, check objects...");
}
CheckObjects();
-
+
// Phase 2, check directories
if(!mQuiet)
{
BOX_INFO("Phase 2, check directories...");
}
CheckDirectories();
-
+
// Phase 3, check root
if(!mQuiet)
{
@@ -141,7 +141,7 @@ void BackupStoreCheck::Check()
}
FixDirsWithWrongContainerID();
FixDirsWithLostDirs();
-
+
// Phase 6, regenerate store info
if(!mQuiet)
{
@@ -257,12 +257,12 @@ void BackupStoreCheck::CheckObjects()
{
start.resize(start.size() - 1);
}
-
+
maxDir = CheckObjectsScanDir(0, 1, mStoreRoot);
BOX_TRACE("Max dir starting ID is " <<
BOX_FORMAT_OBJECTID(maxDir));
}
-
+
// Then go through and scan all the objects within those directories
for(int64_t d = 0; d <= maxDir; d += (1<<STORE_ID_SEGMENT_LENGTH))
{
@@ -290,11 +290,11 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
// If any of the directories is missing, create it.
RaidFileController &rcontroller(RaidFileController::GetController());
RaidFileDiscSet rdiscSet(rcontroller.GetDiscSet(mDiscSetNumber));
-
+
if(!rdiscSet.IsNonRaidSet())
{
unsigned int numDiscs = rdiscSet.size();
-
+
for(unsigned int l = 0; l < numDiscs; ++l)
{
// build name
@@ -307,7 +307,7 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
{
THROW_SYS_FILE_ERROR("Failed to "
"create missing RaidFile "
- "directory", dn,
+ "directory", dn,
RaidFileException, OSError);
}
}
@@ -337,7 +337,7 @@ int64_t BackupStoreCheck::CheckObjectsScanDir(int64_t StartID, int Level, const
else
{
BOX_ERROR("Spurious or invalid directory " <<
- rDirName << DIRECTORY_SEPARATOR <<
+ rDirName << DIRECTORY_SEPARATOR <<
(*i) << " found, " <<
(mFixErrors?"deleting":"delete manually"));
++mNumberErrorsFound;
@@ -364,11 +364,11 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
std::string dirName;
StoreStructure::MakeObjectFilename(StartID, mStoreRoot, mDiscSetNumber, dirName, false /* don't make sure the dir exists */);
// Check expectations
- ASSERT(dirName.size() > 4 &&
+ ASSERT(dirName.size() > 4 &&
dirName[dirName.size() - 4] == DIRECTORY_SEPARATOR_ASCHAR);
// Remove the filename from it
dirName.resize(dirName.size() - 4); // four chars for "/o00"
-
+
// Check directory exists
if(!RaidFileRead::DirectoryExists(mDiscSetNumber, dirName))
{
@@ -380,14 +380,14 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
std::vector<std::string> files;
RaidFileRead::ReadDirectoryContents(mDiscSetNumber, dirName,
RaidFileRead::DirReadType_FilesOnly, files);
-
+
// Array of things present
bool idsPresent[(1<<STORE_ID_SEGMENT_LENGTH)];
for(int l = 0; l < (1<<STORE_ID_SEGMENT_LENGTH); ++l)
{
idsPresent[l] = false;
}
-
+
// Parse each entry, building up a list of object IDs which are present in the dir.
// This is done so that whatever order is retured from the directory, objects are scanned
// in order.
@@ -416,11 +416,11 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
{
fileOK = false;
}
-
+
if(!fileOK)
{
// Unexpected or bad file, delete it
- BOX_ERROR("Spurious file " << dirName <<
+ BOX_ERROR("Spurious file " << dirName <<
DIRECTORY_SEPARATOR << (*i) << " found" <<
(mFixErrors?", deleting":""));
++mNumberErrorsFound;
@@ -431,7 +431,7 @@ void BackupStoreCheck::CheckObjectsDir(int64_t StartID)
}
}
}
-
+
// Check all the objects found in this directory
for(int i = 0; i < (1<<STORE_ID_SEGMENT_LENGTH); ++i)
{
@@ -483,7 +483,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
std::auto_ptr<RaidFileRead> file(
RaidFileRead::Open(mDiscSetNumber, rFilename));
size = file->GetDiscUsageInBlocks();
-
+
// Read in first four bytes -- don't have to worry about
// retrying if not all bytes read as is RaidFile
uint32_t signature;
@@ -494,7 +494,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
}
// Seek back to beginning
file->Seek(0, IOStream::SeekType_Absolute);
-
+
// Then... check depending on the type
switch(ntohl(signature))
{
@@ -522,7 +522,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
// Error caught, not a good file then, let it be deleted
return false;
}
-
+
// Got a container ID? (ie check was successful)
if(containerID == -1)
{
@@ -541,13 +541,13 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
// If it looks like a good object, and it's non-RAID, and
// this is a RAID set, then convert it to RAID.
-
+
RaidFileController &rcontroller(RaidFileController::GetController());
RaidFileDiscSet rdiscSet(rcontroller.GetDiscSet(mDiscSetNumber));
if(!rdiscSet.IsNonRaidSet())
{
// See if the file exists
- RaidFileUtil::ExistType existance =
+ RaidFileUtil::ExistType existance =
RaidFileUtil::RaidFileExists(rdiscSet, rFilename);
if(existance == RaidFileUtil::NonRaid)
{
@@ -568,7 +568,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
if(mFixErrors)
{
std::auto_ptr<RaidFileRead> read(
- RaidFileRead::Open(mDiscSetNumber,
+ RaidFileRead::Open(mDiscSetNumber,
rFilename));
RaidFileWrite write(mDiscSetNumber, rFilename);
write.Open(true /* overwrite */);
@@ -578,7 +578,7 @@ bool BackupStoreCheck::CheckAndAddObject(int64_t ObjectID,
}
}
}
-
+
// Report success
return true;
}
@@ -639,7 +639,7 @@ int64_t BackupStoreCheck::CheckDirInitial(int64_t ObjectID, IOStream &rStream)
// Wrong object ID
return -1;
}
-
+
// Return container ID
return dir.GetContainerID();
}
@@ -672,7 +672,7 @@ void BackupStoreCheck::CheckDirectories()
{
IDBlock *pblock = i->second;
int32_t bentries = (pblock == mpInfoLastBlock)?mInfoLastBlockEntries:BACKUPSTORECHECK_BLOCK_SIZE;
-
+
for(int e = 0; e < bentries; ++e)
{
uint8_t flags = GetFlags(pblock, e);
@@ -706,9 +706,9 @@ void BackupStoreCheck::CheckDirectories()
BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
" was OK after fixing");
}
-
+
if(isModified && mFixErrors)
- {
+ {
BOX_WARNING("Writing modified directory to disk: " <<
BOX_FORMAT_OBJECTID(pblock->mID[e]));
RaidFileWrite fixed(mDiscSetNumber, filename);
@@ -824,14 +824,14 @@ bool BackupStoreCheck::CheckDirectory(BackupStoreDirectory& dir)
{
// Just remove the entry
badEntry = true;
- BOX_ERROR("Directory ID " <<
+ BOX_ERROR("Directory ID " <<
BOX_FORMAT_OBJECTID(dir.GetObjectID()) <<
- " references object " <<
+ " references object " <<
BOX_FORMAT_OBJECTID(en->GetObjectID()) <<
" which does not exist.");
++mNumberErrorsFound;
}
-
+
// Is this entry worth keeping?
if(badEntry)
{
@@ -846,16 +846,16 @@ bool BackupStoreCheck::CheckDirectory(BackupStoreDirectory& dir)
{
BOX_ERROR("Removing directory entry " <<
BOX_FORMAT_OBJECTID(*d) << " from "
- "directory " <<
+ "directory " <<
BOX_FORMAT_OBJECTID(dir.GetObjectID()));
++mNumberErrorsFound;
dir.DeleteEntry(*d);
}
-
+
// Mark as modified
restart = true;
isModified = true;
-
+
// Errors found
}
}
@@ -871,7 +871,7 @@ bool BackupStoreCheck::CheckDirectoryEntry(BackupStoreDirectory::Entry& rEntry,
ASSERT(piBlock != 0);
uint8_t iflags = GetFlags(piBlock, IndexInDirBlock);
-
+
// Is the type the same?
if(((iflags & Flags_IsDir) == Flags_IsDir) != rEntry.IsDir())
{
diff --git a/lib/backupstore/BackupStoreCheck.h b/lib/backupstore/BackupStoreCheck.h
index feecf05c..22473169 100644
--- a/lib/backupstore/BackupStoreCheck.h
+++ b/lib/backupstore/BackupStoreCheck.h
@@ -131,7 +131,7 @@ private:
bool CheckDirectoryEntry(BackupStoreDirectory::Entry& rEntry,
int64_t DirectoryID, bool& rIsModified);
int64_t CheckFile(int64_t ObjectID, IOStream &rStream);
- int64_t CheckDirInitial(int64_t ObjectID, IOStream &rStream);
+ int64_t CheckDirInitial(int64_t ObjectID, IOStream &rStream);
// Fixing functions
bool TryToRecreateDirectory(int64_t MissingDirectoryID);
diff --git a/lib/backupstore/BackupStoreCheck2.cpp b/lib/backupstore/BackupStoreCheck2.cpp
index a4805378..1743d420 100644
--- a/lib/backupstore/BackupStoreCheck2.cpp
+++ b/lib/backupstore/BackupStoreCheck2.cpp
@@ -40,7 +40,7 @@ void BackupStoreCheck::CheckRoot()
{
int32_t index = 0;
IDBlock *pblock = LookupID(BACKUPSTORE_ROOT_DIRECTORY_ID, index);
-
+
if(pblock != 0)
{
// Found it. Which is lucky. Mark it as contained.
@@ -49,9 +49,9 @@ void BackupStoreCheck::CheckRoot()
else
{
BOX_WARNING("Root directory doesn't exist");
-
+
++mNumberErrorsFound;
-
+
if(mFixErrors)
{
// Create a new root directory
@@ -78,7 +78,7 @@ void BackupStoreCheck::CreateBlankDirectory(int64_t DirectoryID, int64_t Contain
}
BackupStoreDirectory dir(DirectoryID, ContainingDirID);
-
+
// Serialise to disc
std::string filename;
StoreStructure::MakeObjectFilename(DirectoryID, mStoreRoot, mDiscSetNumber, filename, true /* make sure the dir exists */);
@@ -87,10 +87,10 @@ void BackupStoreCheck::CreateBlankDirectory(int64_t DirectoryID, int64_t Contain
dir.WriteToStream(obj);
int64_t size = obj.GetDiscUsageInBlocks();
obj.Commit(true /* convert to raid now */);
-
+
// Record the fact we've done this
mDirsAdded.insert(DirectoryID);
-
+
// Add to sizes
mBlocksUsed += size;
mBlocksInDirectories += size;
@@ -131,15 +131,16 @@ void BackupStoreCheck::CheckUnattachedObjects()
{
IDBlock *pblock = i->second;
int32_t bentries = (pblock == mpInfoLastBlock)?mInfoLastBlockEntries:BACKUPSTORECHECK_BLOCK_SIZE;
-
+
for(int e = 0; e < bentries; ++e)
{
uint8_t flags = GetFlags(pblock, e);
if((flags & Flags_IsContained) == 0)
{
// Unattached object...
+ int64_t ObjectID = pblock->mID[e];
BOX_ERROR("Object " <<
- BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
" is unattached.");
++mNumberErrorsFound;
@@ -159,7 +160,9 @@ void BackupStoreCheck::CheckUnattachedObjects()
{
int64_t diffFromObjectID = 0;
std::string filename;
- StoreStructure::MakeObjectFilename(pblock->mID[e], mStoreRoot, mDiscSetNumber, filename, false /* don't attempt to make sure the dir exists */);
+ StoreStructure::MakeObjectFilename(ObjectID,
+ mStoreRoot, mDiscSetNumber, filename,
+ false /* don't attempt to make sure the dir exists */);
// The easiest way to do this is to verify it again. Not such a bad penalty, because
// this really shouldn't be done very often.
@@ -172,8 +175,8 @@ void BackupStoreCheck::CheckUnattachedObjects()
// Just delete it to be safe.
if(diffFromObjectID != 0)
{
- BOX_WARNING("Object " << BOX_FORMAT_OBJECTID(pblock->mID[e]) << " is unattached, and is a patch. Deleting, cannot reliably recover.");
-
+ BOX_WARNING("Object " << BOX_FORMAT_OBJECTID(ObjectID) << " is unattached, and is a patch. Deleting, cannot reliably recover.");
+
// Delete this object instead
if(mFixErrors)
{
@@ -182,12 +185,12 @@ void BackupStoreCheck::CheckUnattachedObjects()
}
mBlocksUsed -= pblock->mObjectSizeInBlocks[e];
-
+
// Move on to next item
continue;
}
}
-
+
// Files contain their original filename, so perhaps the orginal directory still exists,
// or we can infer the existance of a directory?
// Look for a matching entry in the mDirsWhichContainLostDirs map.
@@ -253,7 +256,7 @@ void BackupStoreCheck::CheckUnattachedObjects()
}
// Add it to the directory
- pFixer->InsertObject(pblock->mID[e],
+ pFixer->InsertObject(ObjectID,
((flags & Flags_IsDir) == Flags_IsDir),
lostDirNameSerial);
}
@@ -288,7 +291,7 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
// Not a missing directory, can't recreate.
return false;
}
-
+
// Can recreate this! Wooo!
if(!mFixErrors)
{
@@ -301,12 +304,12 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
BOX_WARNING("Recreating missing directory " <<
BOX_FORMAT_OBJECTID(MissingDirectoryID));
-
+
// Create a blank directory
BackupStoreDirectory dir(MissingDirectoryID, missing->second /* containing dir ID */);
// Note that this directory already contains a directory entry pointing to
// this dir, so it doesn't have to be added.
-
+
// Serialise to disc
std::string filename;
StoreStructure::MakeObjectFilename(MissingDirectoryID, mStoreRoot, mDiscSetNumber, filename, true /* make sure the dir exists */);
@@ -314,10 +317,10 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
root.Open(false /* don't allow overwriting */);
dir.WriteToStream(root);
root.Commit(true /* convert to raid now */);
-
+
// Record the fact we've done this
mDirsAdded.insert(MissingDirectoryID);
-
+
// Remove the entry from the map, so this doesn't happen again
mDirsWhichContainLostDirs.erase(missing);
@@ -332,7 +335,7 @@ BackupStoreDirectoryFixer::BackupStoreDirectoryFixer(std::string storeRoot,
// Generate filename
StoreStructure::MakeObjectFilename(ID, mStoreRoot, mDiscSetNumber,
mFilename, false /* don't make sure the dir exists */);
-
+
// Read it in
std::auto_ptr<RaidFileRead> file(
RaidFileRead::Open(mDiscSetNumber, mFilename));
@@ -374,7 +377,7 @@ void BackupStoreDirectoryFixer::InsertObject(int64_t ObjectID, bool IsDirectory,
(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
&& ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
-#endif
+#endif
))
{
// This should never happen, everything has been
@@ -397,7 +400,7 @@ BackupStoreDirectoryFixer::~BackupStoreDirectoryFixer()
{
// Fix any flags which have been broken, which there's a good chance of doing
mDirectory.CheckAndFix();
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, mFilename);
root.Open(true /* allow overwriting */);
@@ -457,7 +460,7 @@ int64_t BackupStoreCheck::GetLostAndFoundDirID()
// Create a blank directory
CreateBlankDirectory(id, BACKUPSTORE_ROOT_DIRECTORY_ID);
-
+
// Add an entry for it
dir.AddEntry(lostAndFound, 0, id, 0, BackupStoreDirectory::Entry::Flags_Dir, 0);
@@ -466,7 +469,7 @@ int64_t BackupStoreCheck::GetLostAndFoundDirID()
root.Open(true /* allow overwriting */);
dir.WriteToStream(root);
root.Commit(true /* convert to raid now */);
-
+
// Store
mLostAndFoundDirectoryID = id;
@@ -498,7 +501,7 @@ void BackupStoreCheck::FixDirsWithWrongContainerID()
int32_t index = 0;
IDBlock *pblock = LookupID(*i, index);
if(pblock == 0) continue;
-
+
// Load in
BackupStoreDirectory dir;
std::string filename;
@@ -510,7 +513,7 @@ void BackupStoreCheck::FixDirsWithWrongContainerID()
// Adjust container ID
dir.SetContainerID(pblock->mContainer[index]);
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, filename);
root.Open(true /* allow overwriting */);
@@ -543,7 +546,7 @@ void BackupStoreCheck::FixDirsWithLostDirs()
int32_t index = 0;
IDBlock *pblock = LookupID(i->second, index);
if(pblock == 0) continue;
-
+
// Load in
BackupStoreDirectory dir;
std::string filename;
@@ -555,10 +558,10 @@ void BackupStoreCheck::FixDirsWithLostDirs()
// Delete the dodgy entry
dir.DeleteEntry(i->first);
-
+
// Fix it up
dir.CheckAndFix();
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, filename);
root.Open(true /* allow overwriting */);
@@ -626,7 +629,7 @@ void BackupStoreCheck::WriteNewStoreInfo()
"housekeeping doesn't delete files on next run.");
}
}
-
+
// Object ID
int64_t lastObjID = mLastIDInInfo;
if(mLostAndFoundDirectoryID != 0)
@@ -705,7 +708,7 @@ void BackupStoreCheck::WriteNewStoreInfo()
bool BackupStoreDirectory::CheckAndFix()
{
bool changed = false;
-
+
// Check that if a file depends on a new version, that version is in this directory
bool restart;
@@ -728,11 +731,11 @@ bool BackupStoreDirectory::CheckAndFix()
"on newer version " <<
FMT_OID(dependsNewer) <<
" which doesn't exist");
-
+
// Remove
delete *i;
mEntries.erase(i);
-
+
// Mark as changed
changed = true;
@@ -761,7 +764,7 @@ bool BackupStoreDirectory::CheckAndFix()
}
}
while(restart);
-
+
// Check that if a file has a dependency marked, it exists, and remove it if it doesn't
{
std::vector<Entry*>::iterator i(mEntries.begin());
@@ -778,7 +781,7 @@ bool BackupStoreDirectory::CheckAndFix()
"info cleared");
(*i)->SetDependsOlder(0);
-
+
// Mark as changed
changed = true;
}
@@ -790,7 +793,7 @@ bool BackupStoreDirectory::CheckAndFix()
{
// Reset change marker
ch = false;
-
+
// Search backwards -- so see newer versions first
std::vector<Entry*>::iterator i(mEntries.end());
if(i == mEntries.begin())
@@ -816,10 +819,8 @@ bool BackupStoreDirectory::CheckAndFix()
}
else
{
- bool isDir = (((*i)->GetFlags() & Entry::Flags_Dir) == Entry::Flags_Dir);
-
// Check mutually exclusive flags
- if(isDir && (((*i)->GetFlags() & Entry::Flags_File) == Entry::Flags_File))
+ if((*i)->IsDir() && (*i)->IsFile())
{
// Bad! Unset the file flag
BOX_TRACE("Entry " << FMT_i <<
@@ -873,29 +874,29 @@ bool BackupStoreDirectory::CheckAndFix()
}
}
}
-
+
if(removeEntry)
{
// Mark something as changed, in loop
ch = true;
-
+
// Mark something as globally changed
changed = true;
-
+
// erase the thing from the list
Entry *pentry = (*i);
mEntries.erase(i);
// And delete the entry object
delete pentry;
-
+
// Stop going around this loop, as the iterator is now invalid
break;
}
} while(i != mEntries.begin());
} while(ch != false);
-
+
return changed;
}
diff --git a/lib/backupstore/BackupStoreContext.cpp b/lib/backupstore/BackupStoreContext.cpp
index 683f32d2..393dc0d2 100644
--- a/lib/backupstore/BackupStoreContext.cpp
+++ b/lib/backupstore/BackupStoreContext.cpp
@@ -159,7 +159,7 @@ bool BackupStoreContext::AttemptToGetWriteLock()
// Request the lock
bool gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
-
+
if(!gotLock && mpHousekeeping)
{
// The housekeeping process might have the thing open -- ask it to stop
@@ -167,7 +167,7 @@ bool BackupStoreContext::AttemptToGetWriteLock()
int msgLen = sprintf(msg, "r%x\n", mClientID);
// Send message
mpHousekeeping->SendMessageToHousekeepingProcess(msg, msgLen);
-
+
// Then try again a few times
int tries = MAX_WAIT_FOR_HOUSEKEEPING_TO_RELEASE_ACCOUNT;
do
@@ -175,16 +175,16 @@ bool BackupStoreContext::AttemptToGetWriteLock()
::sleep(1 /* second */);
--tries;
gotLock = mWriteLock.TryAndGetLock(writeLockFile.c_str(), 0600 /* restrictive file permissions */);
-
+
} while(!gotLock && tries > 0);
}
-
+
if(gotLock)
{
// Got the lock, mark as not read only
mReadOnly = false;
}
-
+
return gotLock;
}
@@ -203,16 +203,16 @@ void BackupStoreContext::LoadStoreInfo()
{
THROW_EXCEPTION(BackupStoreException, StoreInfoAlreadyLoaded)
}
-
+
// Load it up!
std::auto_ptr<BackupStoreInfo> i(BackupStoreInfo::Load(mClientID, mAccountRootDir, mStoreDiscSet, mReadOnly));
-
+
// Check it
if(i->GetAccountID() != mClientID)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoForWrongAccount)
}
-
+
// Keep the pointer to it
mapStoreInfo = i;
@@ -264,7 +264,7 @@ void BackupStoreContext::SaveStoreInfo(bool AllowDelay)
}
}
- // Want to save now
+ // Want to save now
mapStoreInfo->Save();
// Set count for next delay
@@ -332,9 +332,9 @@ BackupStoreDirectory &BackupStoreContext::GetDirectoryInternal(int64_t ObjectID)
delete item->second;
mDirectoryCache.erase(item);
}
-
+
// Need to load it up
-
+
// First check to see if the cache is too big
if(mDirectoryCache.size() > MAX_CACHE_SIZE)
{
@@ -406,12 +406,12 @@ int64_t BackupStoreContext::AllocateObjectID()
// to try for finding an unused ID.
// (Sizes used in the store info are fixed by the housekeeping process)
int retryLimit = (STORE_INFO_SAVE_DELAY * 2);
-
+
while(retryLimit > 0)
{
// Attempt to allocate an ID from the store
int64_t id = mapStoreInfo->AllocateObjectID();
-
+
// Generate filename
std::string filename;
MakeObjectFilename(id, filename);
@@ -421,17 +421,17 @@ int64_t BackupStoreContext::AllocateObjectID()
// Success!
return id;
}
-
+
// Decrement retry count, and try again
--retryLimit;
-
+
// Mark that the store info should be saved as soon as possible
mSaveStoreInfoDelay = 0;
-
+
BOX_WARNING("When allocating object ID, found that " <<
BOX_FORMAT_OBJECTID(id) << " is already in use");
}
-
+
THROW_EXCEPTION(BackupStoreException, CouldNotFindUnusedIDDuringAllocation)
}
@@ -470,13 +470,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// be corrected the next time the account has a housekeeping run,
// and the object ID allocation code is tolerant of missed IDs.
// (the info is written lazily, so these are necessary)
-
+
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
-
+
// Allocate the next ID
int64_t id = AllocateObjectID();
-
+
// Stream the file to disc
std::string fn;
MakeObjectFilename(id, fn, true /* make sure the directory it's in exists */);
@@ -509,12 +509,12 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
{
THROW_EXCEPTION(BackupStoreException, DiffFromIDNotFoundInDirectory)
}
-
+
// Diff file, needs to be recreated.
// Choose a temporary filename.
std::string tempFn(RaidFileController::DiscSetPathToFileSystemPath(mStoreDiscSet, fn + ".difftemp",
1 /* NOT the same disc as the write file, to avoid using lots of space on the same disc unnecessarily */));
-
+
try
{
// Open it twice
@@ -533,13 +533,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
THROW_EXCEPTION(CommonException, OSFileError);
}
#endif
-
+
// Stream the incoming diff to this temporary file
if(!rFile.CopyStreamTo(diff, BACKUP_STORE_TIMEOUT))
{
THROW_EXCEPTION(BackupStoreException, ReadFileFromStreamTimedOut)
}
-
+
// Verify the diff
diff.Seek(0, IOStream::SeekType_Absolute);
if(!BackupStoreFile::VerifyEncodedFileFormat(diff))
@@ -553,7 +553,7 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// Filename of the old version
std::string oldVersionFilename;
MakeObjectFilename(DiffFromFileID, oldVersionFilename, false /* no need to make sure the directory it's in exists */);
-
+
// Reassemble that diff -- open previous file, and combine the patch and file
std::auto_ptr<RaidFileRead> from(RaidFileRead::Open(mStoreDiscSet, oldVersionFilename));
BackupStoreFile::CombineFile(diff, diff2, *from, storeFile);
@@ -566,10 +566,10 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
diff.Seek(0, IOStream::SeekType_Absolute);
BackupStoreFile::ReverseDiffFile(diff, *from, *from2, *ppreviousVerStoreFile,
DiffFromFileID, &reversedDiffIsCompletelyDifferent);
-
+
// Store disc space used
oldVersionNewBlocksUsed = ppreviousVerStoreFile->GetDiscUsageInBlocks();
-
+
// And make a space adjustment for the size calculation
spaceSavedByConversionToPatch =
from->GetDiscUsageInBlocks() -
@@ -595,13 +595,13 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
throw;
}
}
-
+
// Get the blocks used
newObjectBlocksUsed = storeFile.GetDiscUsageInBlocks();
adjustment.mBlocksUsed += newObjectBlocksUsed;
adjustment.mBlocksInCurrentFiles += newObjectBlocksUsed;
adjustment.mNumCurrentFiles++;
-
+
// Exceeds the hard limit?
int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() +
adjustment.mBlocksUsed;
@@ -637,7 +637,7 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
// Error! Delete the file
RaidFileWrite del(mStoreDiscSet, fn);
del.Delete();
-
+
// Exception
THROW_EXCEPTION(BackupStoreException, AddedFileDoesNotVerify)
}
@@ -665,7 +665,6 @@ int64_t BackupStoreContext::AddFile(IOStream &rFile, int64_t InDirectory,
if(MarkFileWithSameNameAsOldVersions)
{
BackupStoreDirectory::Iterator i(dir);
-
BackupStoreDirectory::Entry *e = 0;
while((e = i.Next()) != 0)
{
@@ -824,7 +823,7 @@ bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_
mapStoreInfo->AdjustNumCurrentFiles(-1);
mapStoreInfo->ChangeBlocksInCurrentFiles(-blocks);
}
-
+
// Is this the last version?
if((e->GetFlags() & BackupStoreDirectory::Entry::Flags_OldVersion) == 0)
{
@@ -834,7 +833,7 @@ bool BackupStoreContext::DeleteFile(const BackupStoreFilename &rFilename, int64_
}
}
}
-
+
// Save changes?
if(madeChanges)
{
@@ -1124,7 +1123,7 @@ int64_t BackupStoreContext::AddDirectory(int64_t InDirectory,
dirSize = dirFile.GetDiscUsageInBlocks();
// Exceeds the hard limit?
- int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() +
+ int64_t newTotalBlocksUsed = mapStoreInfo->GetBlocksUsed() +
dirSize;
if(newTotalBlocksUsed > mapStoreInfo->GetBlocksHardLimit())
{
@@ -1204,7 +1203,7 @@ void BackupStoreContext::DeleteDirectory(int64_t ObjectID, bool Undelete)
{
// In block, because dir may not be valid after the delete directory call
BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-
+
// Store the directory it's in for later
InDirectory = dir.GetContainerID();
@@ -1297,20 +1296,20 @@ void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, bool Undelete)
// Done with the directory for now. Recurse to sub directories
for(std::vector<int64_t>::const_iterator i = subDirs.begin(); i != subDirs.end(); ++i)
{
- DeleteDirectoryRecurse(*i, Undelete);
+ DeleteDirectoryRecurse(*i, Undelete);
}
}
-
+
// Then, delete the files. Will need to load the directory again because it might have
// been removed from the cache.
{
// Get the directory...
BackupStoreDirectory &dir(GetDirectoryInternal(ObjectID));
-
+
// Changes made?
bool changesMade = false;
-
- // Run through files
+
+ // Run through files
BackupStoreDirectory::Iterator i(dir);
BackupStoreDirectory::Entry *en = 0;
@@ -1346,7 +1345,7 @@ void BackupStoreContext::DeleteDirectoryRecurse(int64_t ObjectID, bool Undelete)
// Did something
changesMade = true;
}
-
+
// Save the directory
if(changesMade)
{
@@ -1383,13 +1382,13 @@ void BackupStoreContext::ChangeDirAttributes(int64_t Directory, const Streamable
}
try
- {
+ {
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(Directory));
-
+
// Set attributes
dir.SetAttributes(Attributes, AttributesModTime);
-
+
// Save back
SaveDirectory(dir);
}
@@ -1423,7 +1422,7 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
{
// Get the directory we want to modify
BackupStoreDirectory &dir(GetDirectoryInternal(InDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = 0;
// Iterate through current versions of files, only
@@ -1437,10 +1436,10 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
{
// Set attributes
en->SetAttributes(Attributes, AttributesHash);
-
+
// Tell caller the object ID
rObjectIDOut = en->GetObjectID();
-
+
// Done
break;
}
@@ -1450,7 +1449,7 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
// Didn't find it
return false;
}
-
+
// Save back
SaveDirectory(dir);
}
@@ -1459,7 +1458,7 @@ bool BackupStoreContext::ChangeFileAttributes(const BackupStoreFilename &rFilena
RemoveDirectoryFromCache(InDirectory);
throw;
}
-
+
// Changed, everything OK
return true;
}
@@ -1479,7 +1478,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
// Note that we need to allow object IDs a little bit greater than the last one in the store info,
// because the store info may not have got saved in an error condition. Max greater ID is
// STORE_INFO_SAVE_DELAY in this case, *2 to be safe.
@@ -1488,7 +1487,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
// Obviously bad object ID
return false;
}
-
+
// Test to see if it exists on the disc
std::string filename;
MakeObjectFilename(ObjectID, filename);
@@ -1497,7 +1496,7 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
// RaidFile reports no file there
return false;
}
-
+
// Do we need to be more specific?
if(MustBe != ObjectExists_Anything)
{
@@ -1522,16 +1521,16 @@ bool BackupStoreContext::ObjectExists(int64_t ObjectID, int MustBe)
// Right one?
u_int32_t requiredMagic = (MustBe == ObjectExists_File)?OBJECTMAGIC_FILE_MAGIC_VALUE_V1:OBJECTMAGIC_DIR_MAGIC_VALUE;
-
+
// Check
if(ntohl(magic) != requiredMagic)
{
return false;
}
-
+
// File is implicitly closed
}
-
+
return true;
}
@@ -1550,7 +1549,7 @@ std::auto_ptr<IOStream> BackupStoreContext::OpenObject(int64_t ObjectID)
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
// Attempt to open the file
std::string fn;
MakeObjectFilename(ObjectID, fn);
@@ -1572,7 +1571,7 @@ int64_t BackupStoreContext::GetClientStoreMarker()
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
return mapStoreInfo->GetClientStoreMarker();
}
@@ -1635,7 +1634,7 @@ void BackupStoreContext::SetClientStoreMarker(int64_t ClientStoreMarker)
{
THROW_EXCEPTION(BackupStoreException, ContextIsReadOnly)
}
-
+
mapStoreInfo->SetClientStoreMarker(ClientStoreMarker);
SaveStoreInfo(false /* don't delay saving this */);
}
@@ -1662,7 +1661,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
int64_t targetSearchExcludeFlags = (AllowMoveOverDeletedObject)
?(BackupStoreDirectory::Entry::Flags_Deleted)
:(BackupStoreDirectory::Entry::Flags_EXCLUDE_NOTHING);
-
+
// Special case if the directories are the same...
if(MoveFromDirectory == MoveToDirectory)
{
@@ -1670,16 +1669,16 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Get the first directory
BackupStoreDirectory &dir(GetDirectoryInternal(MoveFromDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = dir.FindEntryByID(ObjectID);
-
+
// Error if not found
if(en == 0)
{
THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
}
-
+
// Check the new name doens't already exist (optionally ignoring deleted files)
{
BackupStoreDirectory::Iterator i(dir);
@@ -1692,7 +1691,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
}
}
}
-
+
// Need to get all the entries with the same name?
if(MoveAllWithSameName)
{
@@ -1713,7 +1712,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
// Just copy this one
en->SetName(rNewFilename);
}
-
+
// Save the directory back
SaveDirectory(dir);
}
@@ -1722,7 +1721,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
RemoveDirectoryFromCache(MoveToDirectory); // either will do, as they're the same
throw;
}
-
+
return;
}
@@ -1732,27 +1731,27 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
// List of entries to move
std::vector<BackupStoreDirectory::Entry *> moving;
-
+
// list of directory IDs which need to have containing dir id changed
std::vector<int64_t> dirsToChangeContainingID;
try
{
// First of all, get copies of the entries to move to the to directory.
-
+
{
// Get the first directory
BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
-
+
// Find the file entry
BackupStoreDirectory::Entry *en = from.FindEntryByID(ObjectID);
-
+
// Error if not found
if(en == 0)
{
THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
}
-
+
// Need to get all the entries with the same name?
if(MoveAllWithSameName)
{
@@ -1765,7 +1764,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Copy
moving.push_back(new BackupStoreDirectory::Entry(*c));
-
+
// Check for containing directory correction
if(c->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(c->GetObjectID());
}
@@ -1781,13 +1780,13 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
if(en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) dirsToChangeContainingID.push_back(en->GetObjectID());
}
}
-
+
// Secondly, insert them into the to directory, and save it
-
+
{
// To directory
BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
-
+
// Check the new name doens't already exist
{
BackupStoreDirectory::Iterator i(to);
@@ -1800,7 +1799,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
}
}
}
-
+
// Copy the entries into it, changing the name as we go
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
@@ -1808,7 +1807,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
en->SetName(rNewFilename);
to.AddEntry(*en); // adds copy
}
-
+
// Save back
SaveDirectory(to);
}
@@ -1818,57 +1817,57 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
{
// Get directory
BackupStoreDirectory &from(GetDirectoryInternal(MoveFromDirectory));
-
+
// Delete each one
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
from.DeleteEntry((*i)->GetObjectID());
}
-
+
// Save back
SaveDirectory(from);
}
catch(...)
{
// UNDO modification to To directory
-
+
// Get directory
BackupStoreDirectory &to(GetDirectoryInternal(MoveToDirectory));
-
+
// Delete each one
for(std::vector<BackupStoreDirectory::Entry *>::iterator i(moving.begin()); i != moving.end(); ++i)
{
to.DeleteEntry((*i)->GetObjectID());
}
-
+
// Save back
SaveDirectory(to);
// Throw the error
throw;
}
-
+
// Finally... for all the directories we moved, modify their containing directory ID
for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
{
// Load the directory
BackupStoreDirectory &change(GetDirectoryInternal(*i));
-
+
// Modify containing dir ID
change.SetContainerID(MoveToDirectory);
-
+
// Save it back
SaveDirectory(change);
}
}
catch(...)
{
- // Make sure directories aren't in the cache, as they may have been modified
+ // Make sure directories aren't in the cache, as they may have been modified
RemoveDirectoryFromCache(MoveToDirectory);
RemoveDirectoryFromCache(MoveFromDirectory);
for(std::vector<int64_t>::iterator i(dirsToChangeContainingID.begin()); i != dirsToChangeContainingID.end(); ++i)
{
- RemoveDirectoryFromCache(*i);
+ RemoveDirectoryFromCache(*i);
}
while(!moving.empty())
@@ -1877,7 +1876,7 @@ void BackupStoreContext::MoveObject(int64_t ObjectID, int64_t MoveFromDirectory,
moving.pop_back();
}
throw;
- }
+ }
// Clean up
while(!moving.empty())
@@ -1903,7 +1902,7 @@ const BackupStoreInfo &BackupStoreContext::GetBackupStoreInfo() const
{
THROW_EXCEPTION(BackupStoreException, StoreInfoNotLoaded)
}
-
+
return *(mapStoreInfo.get());
}
diff --git a/lib/backupstore/BackupStoreContext.h b/lib/backupstore/BackupStoreContext.h
index 2299c0fd..ffbc1e96 100644
--- a/lib/backupstore/BackupStoreContext.h
+++ b/lib/backupstore/BackupStoreContext.h
@@ -135,12 +135,18 @@ public:
const BackupStoreDirectory &GetDirectory(int64_t ObjectID)
{
// External callers aren't allowed to change it -- this function
- // merely turns the the returned directory const.
+ // merely turns the returned directory const.
return GetDirectoryInternal(ObjectID);
}
// Manipulating files/directories
- int64_t AddFile(IOStream &rFile, int64_t InDirectory, int64_t ModificationTime, int64_t AttributesHash, int64_t DiffFromFileID, const BackupStoreFilename &rFilename, bool MarkFileWithSameNameAsOldVersions);
+ int64_t AddFile(IOStream &rFile,
+ int64_t InDirectory,
+ int64_t ModificationTime,
+ int64_t AttributesHash,
+ int64_t DiffFromFileID,
+ const BackupStoreFilename &rFilename,
+ bool MarkFileWithSameNameAsOldVersions);
int64_t AddDirectory(int64_t InDirectory,
const BackupStoreFilename &rFilename,
const StreamableMemBlock &Attributes,
diff --git a/lib/backupstore/BackupStoreDirectory.cpp b/lib/backupstore/BackupStoreDirectory.cpp
index edef25bc..37957770 100644
--- a/lib/backupstore/BackupStoreDirectory.cpp
+++ b/lib/backupstore/BackupStoreDirectory.cpp
@@ -139,21 +139,21 @@ void BackupStoreDirectory::ReadFromStream(IOStream &rStream, int Timeout)
BOX_FORMAT_HEX32(ntohl(hdr.mMagicValue)) << " in " <<
rStream.ToString());
}
-
+
// Get data
mObjectID = box_ntoh64(hdr.mObjectID);
mContainerID = box_ntoh64(hdr.mContainerID);
mAttributesModTime = box_ntoh64(hdr.mAttributesModTime);
-
+
// Options
int32_t options = ntohl(hdr.mOptionsPresent);
-
+
// Get attributes
mAttributes.ReadFromStream(rStream, Timeout);
-
+
// Decode count
int count = ntohl(hdr.mNumEntries);
-
+
// Clear existing list
for(std::vector<Entry*>::iterator i = mEntries.begin();
i != mEntries.end(); i++)
@@ -161,7 +161,7 @@ void BackupStoreDirectory::ReadFromStream(IOStream &rStream, int Timeout)
delete (*i);
}
mEntries.clear();
-
+
// Read them in!
for(int c = 0; c < count; ++c)
{
@@ -170,7 +170,7 @@ void BackupStoreDirectory::ReadFromStream(IOStream &rStream, int Timeout)
{
// Read from stream
pen->ReadFromStream(rStream, Timeout);
-
+
// Add to list
mEntries.push_back(pen);
}
@@ -180,7 +180,7 @@ void BackupStoreDirectory::ReadFromStream(IOStream &rStream, int Timeout)
throw;
}
}
-
+
// Read in dependency info?
if(options & Option_DependencyInfoPresent)
{
@@ -214,11 +214,11 @@ void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeS
count++;
}
}
-
+
// Check that sensible IDs have been set
ASSERT(mObjectID != 0);
ASSERT(mContainerID != 0);
-
+
// Need dependency info?
bool dependencyInfoRequired = false;
if(StreamDependencyInfo)
@@ -231,9 +231,9 @@ void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeS
{
dependencyInfoRequired = true;
}
- }
+ }
}
-
+
// Options
int32_t options = 0;
if(dependencyInfoRequired) options |= Option_DependencyInfoPresent;
@@ -246,10 +246,10 @@ void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeS
hdr.mContainerID = box_hton64(mContainerID);
hdr.mAttributesModTime = box_hton64(mAttributesModTime);
hdr.mOptionsPresent = htonl(options);
-
+
// Write header
rStream.Write(&hdr, sizeof(hdr));
-
+
// Write the attributes?
if(StreamAttributes)
{
@@ -268,7 +268,7 @@ void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeS
{
pen->WriteToStream(rStream);
}
-
+
// Write dependency info?
if(dependencyInfoRequired)
{
@@ -277,7 +277,7 @@ void BackupStoreDirectory::WriteToStream(IOStream &rStream, int16_t FlagsMustBeS
while((pen = i.Next(FlagsMustBeSet, FlagsNotToBeSet)) != 0)
{
pen->WriteToStreamDependencyInfo(rStream);
- }
+ }
}
}
@@ -301,7 +301,7 @@ BackupStoreDirectory::Entry *BackupStoreDirectory::AddEntry(const Entry &rEntryT
delete pnew;
throw;
}
-
+
return pnew;
}
@@ -329,7 +329,7 @@ BackupStoreDirectory::AddEntry(const BackupStoreFilename &rName,
delete pnew;
throw;
}
-
+
return pnew;
}
@@ -356,9 +356,11 @@ void BackupStoreDirectory::DeleteEntry(int64_t ObjectID)
return;
}
}
-
+
// Not found
- THROW_EXCEPTION(BackupStoreException, CouldNotFindEntryInDirectory)
+ THROW_EXCEPTION_MESSAGE(BackupStoreException, CouldNotFindEntryInDirectory,
+ "Failed to find entry " << BOX_FORMAT_OBJECTID(ObjectID) <<
+ " in directory " << BOX_FORMAT_OBJECTID(mObjectID));
}
@@ -396,15 +398,16 @@ BackupStoreDirectory::Entry *BackupStoreDirectory::FindEntryByID(int64_t ObjectI
//
// --------------------------------------------------------------------------
BackupStoreDirectory::Entry::Entry()
- : mModificationTime(0),
- mObjectID(0),
- mSizeInBlocks(0),
- mFlags(0),
- mAttributesHash(0),
- mMinMarkNumber(0),
- mMarkNumber(0),
- mDependsNewer(0),
- mDependsOlder(0)
+:
+ mModificationTime(0),
+ mObjectID(0),
+ mSizeInBlocks(0),
+ mFlags(0),
+ mAttributesHash(0),
+ mMinMarkNumber(0),
+ mMarkNumber(0),
+ mDependsNewer(0),
+ mDependsOlder(0)
{
}
@@ -429,17 +432,18 @@ BackupStoreDirectory::Entry::~Entry()
//
// --------------------------------------------------------------------------
BackupStoreDirectory::Entry::Entry(const Entry &rToCopy)
- : mName(rToCopy.mName),
- mModificationTime(rToCopy.mModificationTime),
- mObjectID(rToCopy.mObjectID),
- mSizeInBlocks(rToCopy.mSizeInBlocks),
- mFlags(rToCopy.mFlags),
- mAttributesHash(rToCopy.mAttributesHash),
- mAttributes(rToCopy.mAttributes),
- mMinMarkNumber(rToCopy.mMinMarkNumber),
- mMarkNumber(rToCopy.mMarkNumber),
- mDependsNewer(rToCopy.mDependsNewer),
- mDependsOlder(rToCopy.mDependsOlder)
+:
+ mName(rToCopy.mName),
+ mModificationTime(rToCopy.mModificationTime),
+ mObjectID(rToCopy.mObjectID),
+ mSizeInBlocks(rToCopy.mSizeInBlocks),
+ mFlags(rToCopy.mFlags),
+ mAttributesHash(rToCopy.mAttributesHash),
+ mAttributes(rToCopy.mAttributes),
+ mMinMarkNumber(rToCopy.mMinMarkNumber),
+ mMarkNumber(rToCopy.mMarkNumber),
+ mDependsNewer(rToCopy.mDependsNewer),
+ mDependsOlder(rToCopy.mDependsOlder)
{
}
@@ -453,16 +457,17 @@ BackupStoreDirectory::Entry::Entry(const Entry &rToCopy)
//
// --------------------------------------------------------------------------
BackupStoreDirectory::Entry::Entry(const BackupStoreFilename &rName, box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks, int16_t Flags, uint64_t AttributesHash)
- : mName(rName),
- mModificationTime(ModificationTime),
- mObjectID(ObjectID),
- mSizeInBlocks(SizeInBlocks),
- mFlags(Flags),
- mAttributesHash(AttributesHash),
- mMinMarkNumber(0),
- mMarkNumber(0),
- mDependsNewer(0),
- mDependsOlder(0)
+:
+ mName(rName),
+ mModificationTime(ModificationTime),
+ mObjectID(ObjectID),
+ mSizeInBlocks(SizeInBlocks),
+ mFlags(Flags),
+ mAttributesHash(AttributesHash),
+ mMinMarkNumber(0),
+ mMarkNumber(0),
+ mDependsNewer(0),
+ mDependsOlder(0)
{
}
@@ -487,11 +492,11 @@ void BackupStoreDirectory::Entry::ReadFromStream(IOStream &rStream, int Timeout)
}
// Do reading first before modifying the variables, to be more exception safe
-
+
// Get the filename
BackupStoreFilename name;
name.ReadFromStream(rStream, Timeout);
-
+
// Get the attributes
mAttributes.ReadFromStream(rStream, Timeout);
@@ -522,13 +527,13 @@ void BackupStoreDirectory::Entry::WriteToStream(IOStream &rStream) const
entry.mSizeInBlocks = box_hton64(mSizeInBlocks);
entry.mAttributesHash = box_hton64(mAttributesHash);
entry.mFlags = htons(mFlags);
-
+
// Write it
rStream.Write(&entry, sizeof(entry));
-
+
// Write the filename
mName.WriteToStream(rStream);
-
+
// Write any attributes
mAttributes.WriteToStream(rStream);
}
@@ -568,12 +573,9 @@ void BackupStoreDirectory::Entry::ReadFromStreamDependencyInfo(IOStream &rStream
void BackupStoreDirectory::Entry::WriteToStreamDependencyInfo(IOStream &rStream) const
{
// Build structure
- en_StreamFormatDepends depends;
+ en_StreamFormatDepends depends;
depends.mDependsNewer = box_hton64(mDependsNewer);
depends.mDependsOlder = box_hton64(mDependsOlder);
// Write
rStream.Write(&depends, sizeof(depends));
}
-
-
-
diff --git a/lib/backupstore/BackupStoreDirectory.h b/lib/backupstore/BackupStoreDirectory.h
index 65c60a83..d9811fe5 100644
--- a/lib/backupstore/BackupStoreDirectory.h
+++ b/lib/backupstore/BackupStoreDirectory.h
@@ -58,7 +58,7 @@ public:
~Entry();
Entry(const Entry &rToCopy);
Entry(const BackupStoreFilename &rName, box_time_t ModificationTime, int64_t ObjectID, int64_t SizeInBlocks, int16_t Flags, uint64_t AttributesHash);
-
+
void ReadFromStream(IOStream &rStream, int Timeout);
void WriteToStream(IOStream &rStream) const;
@@ -148,7 +148,7 @@ public:
StreamableMemBlock mAttributes;
uint32_t mMinMarkNumber;
uint32_t mMarkNumber;
-
+
uint64_t mDependsNewer; // new version this depends on
uint64_t mDependsOlder; // older version which depends on this
};
@@ -196,7 +196,7 @@ public:
: mrDir(rDir), i(rDir.mEntries.begin())
{
}
-
+
BackupStoreDirectory::Entry *Next(int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING, int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING)
{
// Skip over things which don't match the required flags
@@ -238,7 +238,7 @@ public:
const BackupStoreDirectory &mrDir;
std::vector<Entry*>::const_iterator i;
};
-
+
friend class Iterator;
class ReverseIterator
@@ -248,7 +248,7 @@ public:
: mrDir(rDir), i(rDir.mEntries.rbegin())
{
}
-
+
BackupStoreDirectory::Entry *Next(int16_t FlagsMustBeSet = Entry::Flags_INCLUDE_EVERYTHING, int16_t FlagsNotToBeSet = Entry::Flags_EXCLUDE_NOTHING)
{
// Skip over things which don't match the required flags
@@ -264,12 +264,12 @@ public:
// Return entry, and increment
return (*(i++));
}
-
+
private:
const BackupStoreDirectory &mrDir;
std::vector<Entry*>::const_reverse_iterator i;
};
-
+
friend class ReverseIterator;
// For recovery of the store
@@ -297,4 +297,3 @@ private:
};
#endif // BACKUPSTOREDIRECTORY__H
-
diff --git a/lib/backupstore/BackupStoreFilenameClear.h b/lib/backupstore/BackupStoreFilenameClear.h
index 595d1158..b7cf555f 100644
--- a/lib/backupstore/BackupStoreFilenameClear.h
+++ b/lib/backupstore/BackupStoreFilenameClear.h
@@ -42,7 +42,7 @@ public:
#endif
void SetClearFilename(const std::string &rToEncode);
- // Setup for encryption of filenames
+ // Setup for encryption of filenames
static void SetBlowfishKey(const void *pKey, int KeyLength, const void *pIV, int IVLength);
static void SetEncodingMethod(int Method);
diff --git a/lib/backupstore/HousekeepStoreAccount.cpp b/lib/backupstore/HousekeepStoreAccount.cpp
index 2bcc59ae..8defcab1 100644
--- a/lib/backupstore/HousekeepStoreAccount.cpp
+++ b/lib/backupstore/HousekeepStoreAccount.cpp
@@ -243,14 +243,14 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever)
// Go and delete items from the accounts
bool deleteInterrupted = DeleteFiles(*info);
-
+
// If that wasn't interrupted, remove any empty directories which
// are also marked as deleted in their containing directory
if(!deleteInterrupted)
{
deleteInterrupted = DeleteEmptyDirectories(*info);
}
-
+
// Log deletion if anything was deleted
if(mFilesDeleted > 0 || mEmptyDirectoriesDeleted > 0)
{
@@ -272,17 +272,17 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever)
}
if(mBlocksInOldFilesDelta < (0 - info->GetBlocksInOldFiles()))
{
- mBlocksInOldFilesDelta = (0 - info->GetBlocksInOldFiles());
+ mBlocksInOldFilesDelta = (0 - info->GetBlocksInOldFiles());
}
if(mBlocksInDeletedFilesDelta < (0 - info->GetBlocksInDeletedFiles()))
{
- mBlocksInDeletedFilesDelta = (0 - info->GetBlocksInDeletedFiles());
+ mBlocksInDeletedFilesDelta = (0 - info->GetBlocksInDeletedFiles());
}
if(mBlocksInDirectoriesDelta < (0 - info->GetBlocksInDirectories()))
{
mBlocksInDirectoriesDelta = (0 - info->GetBlocksInDirectories());
}
-
+
// Update the usage counts in the store
info->ChangeBlocksUsed(mBlocksUsedDelta);
info->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta);
@@ -291,7 +291,7 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever)
// Save the store info back
info->Save();
-
+
// force file to be saved and closed before releasing the lock below
mapNewRefs->Commit();
mapNewRefs.reset();
@@ -358,19 +358,19 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
// Open it.
std::auto_ptr<RaidFileRead> dirStream(RaidFileRead::Open(mStoreDiscSet,
objectFilename));
-
+
// Add the size of the directory on disc to the size being calculated
int64_t originalDirSizeInBlocks = dirStream->GetDiscUsageInBlocks();
mBlocksInDirectories += originalDirSizeInBlocks;
mBlocksUsed += originalDirSizeInBlocks;
-
+
// Read the directory in
BackupStoreDirectory dir;
BufferedStream buf(*dirStream);
dir.ReadFromStream(buf, IOStream::TimeOutInfinite);
dir.SetUserInfo1_SizeInBlocks(originalDirSizeInBlocks);
dirStream->Close();
-
+
// Is it empty?
if(dir.GetNumberOfEntries() == 0)
{
@@ -415,7 +415,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
// flag as having done something
deletedSomething = true;
-
+
// Must start the loop from the beginning again, as iterator is now
// probably invalid.
break;
@@ -423,7 +423,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
}
} while(deletedSomething);
}
-
+
// BLOCK
{
// Add files to the list of potential deletions
@@ -443,9 +443,9 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
int16_t enFlags = en->GetFlags();
int64_t enSizeInBlocks = en->GetSizeInBlocks();
mBlocksUsed += enSizeInBlocks;
- if(enFlags & BackupStoreDirectory::Entry::Flags_OldVersion) mBlocksInOldFiles += enSizeInBlocks;
- if(enFlags & BackupStoreDirectory::Entry::Flags_Deleted) mBlocksInDeletedFiles += enSizeInBlocks;
-
+ if(en->IsOld()) mBlocksInOldFiles += enSizeInBlocks;
+ if(en->IsDeleted()) mBlocksInDeletedFiles += enSizeInBlocks;
+
// Work out ages of this version from the last mark
int32_t enVersionAge = 0;
std::map<version_t, int32_t>::iterator enVersionAgeI(
@@ -462,9 +462,9 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
markVersionAges[version_t(en->GetName().GetEncodedFilename(), en->GetMarkNumber())] = enVersionAge;
}
// enVersionAge is now the age of this version.
-
+
// Potentially add it to the list if it's deleted, if it's an old version or deleted
- if((enFlags & (BackupStoreDirectory::Entry::Flags_Deleted | BackupStoreDirectory::Entry::Flags_OldVersion)) != 0)
+ if(en->IsOld() || en->IsDeleted())
{
// Is deleted / old version.
DelEn d;
@@ -473,17 +473,15 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
d.mSizeInBlocks = en->GetSizeInBlocks();
d.mMarkNumber = en->GetMarkNumber();
d.mVersionAgeWithinMark = enVersionAge;
- d.mIsFlagDeleted = (enFlags &
- BackupStoreDirectory::Entry::Flags_Deleted)
- ? true : false;
-
+ d.mIsFlagDeleted = en->IsDeleted();
+
// Add it to the list
mPotentialDeletions.insert(d);
-
+
// Update various counts
mPotentialDeletionsTotalSize += d.mSizeInBlocks;
if(d.mSizeInBlocks > mMaxSizeInPotentialDeletions) mMaxSizeInPotentialDeletions = d.mSizeInBlocks;
-
+
// Too much in the list of potential deletions?
// (check against the deletion target + the max size in deletions, so that we never delete things
// and take the total size below the deletion size target)
@@ -491,7 +489,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
{
int64_t sizeToRemove = mPotentialDeletionsTotalSize - (mDeletionSizeTarget + mMaxSizeInPotentialDeletions);
bool recalcMaxSize = false;
-
+
while(sizeToRemove > 0)
{
// Make iterator for the last element, while checking that there's something there in the first place.
@@ -503,7 +501,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
}
// Make this into an iterator pointing to the last element in the set
--i;
-
+
// Delete this one?
if(sizeToRemove > i->mSizeInBlocks)
{
@@ -521,7 +519,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
break;
}
}
-
+
if(recalcMaxSize)
{
// Because an object which was the maximum size recorded was deleted from the set
@@ -541,14 +539,13 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
}
}
+ // Recurse into subdirectories
{
- // Recurse into subdirectories
BackupStoreDirectory::Iterator i(dir);
BackupStoreDirectory::Entry *en = 0;
while((en = i.Next(BackupStoreDirectory::Entry::Flags_Dir)) != 0)
{
- // Next level
- ASSERT((en->GetFlags() & BackupStoreDirectory::Entry::Flags_Dir) == BackupStoreDirectory::Entry::Flags_Dir);
+ ASSERT(en->IsDir());
if(!ScanDirectory(en->GetObjectID(), rBackupStoreInfo))
{
@@ -557,7 +554,7 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID,
}
}
}
-
+
return true;
}
@@ -578,7 +575,7 @@ bool HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount
// The sort order here is intended to preserve the entries of most value, that is, the newest objects
// which are on a mark boundary.
-
+
// Reverse order age, so oldest goes first
if(x.mVersionAgeWithinMark > y.mVersionAgeWithinMark)
{
@@ -650,7 +647,7 @@ bool HousekeepStoreAccount::DeleteFiles(BackupStoreInfo& rBackupStoreInfo)
dir.ReadFromStream(*dirStream, IOStream::TimeOutInfinite);
dir.SetUserInfo1_SizeInBlocks(dirStream->GetDiscUsageInBlocks());
}
-
+
// Delete the file
BackupStoreRefCountDatabase::refcount_t refs =
DeleteFile(i->mInDirectory, i->mObjectID, dir,
@@ -729,7 +726,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
mErrorCount++;
return refs;
}
-
+
// Record the flags it's got set
wasDeleted = pentry->IsDeleted();
wasOldVersion = pentry->IsOld();
@@ -739,7 +736,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
// Things changed size we were last around
return refs;
}
-
+
// Record size
deletedFileSizeInBlocks = pentry->GetSizeInBlocks();
@@ -794,7 +791,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
else
{
// This entry is in the middle of a chain, and two patches need combining.
-
+
// First, adjust the directory entries
BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer());
if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID
@@ -806,7 +803,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
pnewer->SetDependsOlder(pentry->GetDependsOlder());
polder->SetDependsNewer(pentry->GetDependsNewer());
}
-
+
// COMMON CODE to both cases
// Generate the filename of the older version
@@ -835,7 +832,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry);
}
// The file will be committed later when the directory is safely commited.
-
+
// Work out the adjusted size
int64_t newSize = padjustedEntry->GetDiscUsageInBlocks();
int64_t sizeDelta = newSize - polder->GetSizeInBlocks();
@@ -850,13 +847,13 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
}
polder->SetSizeInBlocks(newSize);
}
-
+
// pentry no longer valid
}
-
+
// Delete it from the directory
rDirectory.DeleteEntry(ObjectID);
-
+
// Save directory back to disc
// BLOCK
{
@@ -884,7 +881,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
if(padjustedEntry.get() != 0)
{
padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY);
- padjustedEntry.reset(); // delete it now
+ padjustedEntry.reset(); // delete it now
}
// Drop reference count by one. Must now be zero, to delete the file.
@@ -913,7 +910,7 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile(
mBlocksInOldFilesDelta -= deletedFileSizeInBlocks;
rBackupStoreInfo.AdjustNumOldFiles(-1);
}
-
+
// Delete the directory?
// Do this if... dir has zero entries, and is marked as deleted in it's containing directory
if(rDirectory.GetNumberOfEntries() == 0)
@@ -1040,7 +1037,7 @@ bool HousekeepStoreAccount::DeleteEmptyDirectories(BackupStoreInfo& rBackupStore
// Swap in new, so it's examined next time round
mEmptyDirectories.swap(toExamine);
}
-
+
// Not interrupted
return false;
}
diff --git a/lib/backupstore/backupprotocol.txt b/lib/backupstore/backupprotocol.txt
index a151df9b..5921d009 100644
--- a/lib/backupstore/backupprotocol.txt
+++ b/lib/backupstore/backupprotocol.txt
@@ -159,6 +159,7 @@ ChangeDirAttributes 22 Command(Success) StreamWithCommand
DeleteDirectory 23 Command(Success)
int64 ObjectID
+
UndeleteDirectory 24 Command(Success)
int64 ObjectID
# may not have exactly the desired effect if files within in have been deleted before the directory was deleted.