summaryrefslogtreecommitdiff
path: root/lib/backupstore/BackupStoreCheck2.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/backupstore/BackupStoreCheck2.cpp')
-rw-r--r--lib/backupstore/BackupStoreCheck2.cpp175
1 files changed, 94 insertions, 81 deletions
diff --git a/lib/backupstore/BackupStoreCheck2.cpp b/lib/backupstore/BackupStoreCheck2.cpp
index 90e21e7f..13831a09 100644
--- a/lib/backupstore/BackupStoreCheck2.cpp
+++ b/lib/backupstore/BackupStoreCheck2.cpp
@@ -20,6 +20,7 @@
#include "BackupStoreFileWire.h"
#include "BackupStoreInfo.h"
#include "BackupStoreObjectMagic.h"
+#include "BackupStoreRefCountDatabase.h"
#include "MemBlockStream.h"
#include "RaidFileRead.h"
#include "RaidFileWrite.h"
@@ -40,7 +41,7 @@ void BackupStoreCheck::CheckRoot()
{
int32_t index = 0;
IDBlock *pblock = LookupID(BACKUPSTORE_ROOT_DIRECTORY_ID, index);
-
+
if(pblock != 0)
{
// Found it. Which is lucky. Mark it as contained.
@@ -49,9 +50,9 @@ void BackupStoreCheck::CheckRoot()
else
{
BOX_WARNING("Root directory doesn't exist");
-
+
++mNumberErrorsFound;
-
+
if(mFixErrors)
{
// Create a new root directory
@@ -78,7 +79,7 @@ void BackupStoreCheck::CreateBlankDirectory(int64_t DirectoryID, int64_t Contain
}
BackupStoreDirectory dir(DirectoryID, ContainingDirID);
-
+
// Serialise to disc
std::string filename;
StoreStructure::MakeObjectFilename(DirectoryID, mStoreRoot, mDiscSetNumber, filename, true /* make sure the dir exists */);
@@ -87,10 +88,10 @@ void BackupStoreCheck::CreateBlankDirectory(int64_t DirectoryID, int64_t Contain
dir.WriteToStream(obj);
int64_t size = obj.GetDiscUsageInBlocks();
obj.Commit(true /* convert to raid now */);
-
+
// Record the fact we've done this
mDirsAdded.insert(DirectoryID);
-
+
// Add to sizes
mBlocksUsed += size;
mBlocksInDirectories += size;
@@ -131,15 +132,16 @@ void BackupStoreCheck::CheckUnattachedObjects()
{
IDBlock *pblock = i->second;
int32_t bentries = (pblock == mpInfoLastBlock)?mInfoLastBlockEntries:BACKUPSTORECHECK_BLOCK_SIZE;
-
+
for(int e = 0; e < bentries; ++e)
{
uint8_t flags = GetFlags(pblock, e);
if((flags & Flags_IsContained) == 0)
{
// Unattached object...
+ int64_t ObjectID = pblock->mID[e];
BOX_ERROR("Object " <<
- BOX_FORMAT_OBJECTID(pblock->mID[e]) <<
+ BOX_FORMAT_OBJECTID(ObjectID) <<
" is unattached.");
++mNumberErrorsFound;
@@ -149,6 +151,8 @@ void BackupStoreCheck::CheckUnattachedObjects()
if((flags & Flags_IsDir) == Flags_IsDir)
{
// Directory. Just put into lost and found.
+ // (It doesn't contain its filename, so we
+ // can't recreate the entry in the parent)
putIntoDirectoryID = GetLostAndFoundDirID();
}
else
@@ -157,7 +161,9 @@ void BackupStoreCheck::CheckUnattachedObjects()
{
int64_t diffFromObjectID = 0;
std::string filename;
- StoreStructure::MakeObjectFilename(pblock->mID[e], mStoreRoot, mDiscSetNumber, filename, false /* don't attempt to make sure the dir exists */);
+ StoreStructure::MakeObjectFilename(ObjectID,
+ mStoreRoot, mDiscSetNumber, filename,
+ false /* don't attempt to make sure the dir exists */);
// The easiest way to do this is to verify it again. Not such a bad penalty, because
// this really shouldn't be done very often.
@@ -170,20 +176,22 @@ void BackupStoreCheck::CheckUnattachedObjects()
// Just delete it to be safe.
if(diffFromObjectID != 0)
{
- BOX_WARNING("Object " << BOX_FORMAT_OBJECTID(pblock->mID[e]) << " is unattached, and is a patch. Deleting, cannot reliably recover.");
-
+ BOX_WARNING("Object " << BOX_FORMAT_OBJECTID(ObjectID) << " is unattached, and is a patch. Deleting, cannot reliably recover.");
+
// Delete this object instead
if(mFixErrors)
{
RaidFileWrite del(mDiscSetNumber, filename);
del.Delete();
}
-
+
+ mBlocksUsed -= pblock->mObjectSizeInBlocks[e];
+
// Move on to next item
continue;
}
}
-
+
// Files contain their original filename, so perhaps the orginal directory still exists,
// or we can infer the existance of a directory?
// Look for a matching entry in the mDirsWhichContainLostDirs map.
@@ -249,9 +257,10 @@ void BackupStoreCheck::CheckUnattachedObjects()
}
// Add it to the directory
- pFixer->InsertObject(pblock->mID[e],
+ pFixer->InsertObject(ObjectID,
((flags & Flags_IsDir) == Flags_IsDir),
lostDirNameSerial);
+ mapNewRefs->AddReference(ObjectID);
}
}
}
@@ -284,7 +293,7 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
// Not a missing directory, can't recreate.
return false;
}
-
+
// Can recreate this! Wooo!
if(!mFixErrors)
{
@@ -297,12 +306,12 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
BOX_WARNING("Recreating missing directory " <<
BOX_FORMAT_OBJECTID(MissingDirectoryID));
-
+
// Create a blank directory
BackupStoreDirectory dir(MissingDirectoryID, missing->second /* containing dir ID */);
// Note that this directory already contains a directory entry pointing to
// this dir, so it doesn't have to be added.
-
+
// Serialise to disc
std::string filename;
StoreStructure::MakeObjectFilename(MissingDirectoryID, mStoreRoot, mDiscSetNumber, filename, true /* make sure the dir exists */);
@@ -310,10 +319,10 @@ bool BackupStoreCheck::TryToRecreateDirectory(int64_t MissingDirectoryID)
root.Open(false /* don't allow overwriting */);
dir.WriteToStream(root);
root.Commit(true /* convert to raid now */);
-
+
// Record the fact we've done this
mDirsAdded.insert(MissingDirectoryID);
-
+
// Remove the entry from the map, so this doesn't happen again
mDirsWhichContainLostDirs.erase(missing);
@@ -328,7 +337,7 @@ BackupStoreDirectoryFixer::BackupStoreDirectoryFixer(std::string storeRoot,
// Generate filename
StoreStructure::MakeObjectFilename(ID, mStoreRoot, mDiscSetNumber,
mFilename, false /* don't make sure the dir exists */);
-
+
// Read it in
std::auto_ptr<RaidFileRead> file(
RaidFileRead::Open(mDiscSetNumber, mFilename));
@@ -347,7 +356,7 @@ void BackupStoreDirectoryFixer::InsertObject(int64_t ObjectID, bool IsDirectory,
{
// Directory -- simply generate a name for it.
char name[32];
- ::sprintf(name, "dir%08x", lostDirNameSerial);
+ ::snprintf(name, sizeof(name), "dir%08x", lostDirNameSerial);
objectStoreFilename.SetAsClearFilename(name);
}
else
@@ -370,7 +379,7 @@ void BackupStoreDirectoryFixer::InsertObject(int64_t ObjectID, bool IsDirectory,
(ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V1
#ifndef BOX_DISABLE_BACKWARDS_COMPATIBILITY_BACKUPSTOREFILE
&& ntohl(hdr.mMagicValue) != OBJECTMAGIC_FILE_MAGIC_VALUE_V0
-#endif
+#endif
))
{
// This should never happen, everything has been
@@ -393,7 +402,7 @@ BackupStoreDirectoryFixer::~BackupStoreDirectoryFixer()
{
// Fix any flags which have been broken, which there's a good chance of doing
mDirectory.CheckAndFix();
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, mFilename);
root.Open(true /* allow overwriting */);
@@ -438,7 +447,7 @@ int64_t BackupStoreCheck::GetLostAndFoundDirID()
while(true)
{
char name[32];
- ::sprintf(name, "lost+found%d", n++);
+ ::snprintf(name, sizeof(name), "lost+found%d", n++);
lostAndFound.SetAsClearFilename(name);
if(!dir.NameInUse(lostAndFound))
{
@@ -453,7 +462,7 @@ int64_t BackupStoreCheck::GetLostAndFoundDirID()
// Create a blank directory
CreateBlankDirectory(id, BACKUPSTORE_ROOT_DIRECTORY_ID);
-
+
// Add an entry for it
dir.AddEntry(lostAndFound, 0, id, 0, BackupStoreDirectory::Entry::Flags_Dir, 0);
@@ -462,7 +471,7 @@ int64_t BackupStoreCheck::GetLostAndFoundDirID()
root.Open(true /* allow overwriting */);
dir.WriteToStream(root);
root.Commit(true /* convert to raid now */);
-
+
// Store
mLostAndFoundDirectoryID = id;
@@ -494,7 +503,7 @@ void BackupStoreCheck::FixDirsWithWrongContainerID()
int32_t index = 0;
IDBlock *pblock = LookupID(*i, index);
if(pblock == 0) continue;
-
+
// Load in
BackupStoreDirectory dir;
std::string filename;
@@ -506,7 +515,7 @@ void BackupStoreCheck::FixDirsWithWrongContainerID()
// Adjust container ID
dir.SetContainerID(pblock->mContainer[index]);
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, filename);
root.Open(true /* allow overwriting */);
@@ -539,7 +548,7 @@ void BackupStoreCheck::FixDirsWithLostDirs()
int32_t index = 0;
IDBlock *pblock = LookupID(i->second, index);
if(pblock == 0) continue;
-
+
// Load in
BackupStoreDirectory dir;
std::string filename;
@@ -551,10 +560,10 @@ void BackupStoreCheck::FixDirsWithLostDirs()
// Delete the dodgy entry
dir.DeleteEntry(i->first);
-
+
// Fix it up
dir.CheckAndFix();
-
+
// Write it out
RaidFileWrite root(mDiscSetNumber, filename);
root.Open(true /* allow overwriting */);
@@ -587,49 +596,42 @@ void BackupStoreCheck::WriteNewStoreInfo()
++mNumberErrorsFound;
}
- BOX_NOTICE("Total files: " << mNumFiles << " (of which "
+ BOX_INFO("Current files: " << mNumCurrentFiles << ", "
"old files: " << mNumOldFiles << ", "
- "deleted files: " << mNumDeletedFiles << "), "
+ "deleted files: " << mNumDeletedFiles << ", "
"directories: " << mNumDirectories);
- // Minimum soft and hard limits
+ // Minimum soft and hard limits to ensure that nothing gets deleted
+ // by housekeeping.
int64_t minSoft = ((mBlocksUsed * 11) / 10) + 1024;
int64_t minHard = ((minSoft * 11) / 10) + 1024;
- // Need to do anything?
- if(pOldInfo.get() != 0 &&
- mNumberErrorsFound == 0 &&
- pOldInfo->GetAccountID() == mAccountID)
- {
- // Leave the store info as it is, no need to alter it because nothing really changed,
- // and the only essential thing was that the account ID was correct, which is was.
- return;
- }
-
- // NOTE: We will always build a new store info, so the client store marker gets changed.
+ int64_t softLimit = pOldInfo.get() ? pOldInfo->GetBlocksSoftLimit() : minSoft;
+ int64_t hardLimit = pOldInfo.get() ? pOldInfo->GetBlocksHardLimit() : minHard;
- // Work out the new limits
- int64_t softLimit = minSoft;
- int64_t hardLimit = minHard;
- if(pOldInfo.get() != 0 && pOldInfo->GetBlocksSoftLimit() > minSoft)
+ if(mNumberErrorsFound && pOldInfo.get())
{
- softLimit = pOldInfo->GetBlocksSoftLimit();
- }
- else
- {
- BOX_WARNING("Soft limit for account changed to ensure "
- "housekeeping doesn't delete files on next run.");
- }
- if(pOldInfo.get() != 0 && pOldInfo->GetBlocksHardLimit() > minHard)
- {
- hardLimit = pOldInfo->GetBlocksHardLimit();
- }
- else
- {
- BOX_WARNING("Hard limit for account changed to ensure "
- "housekeeping doesn't delete files on next run.");
+ if(pOldInfo->GetBlocksSoftLimit() > minSoft)
+ {
+ softLimit = pOldInfo->GetBlocksSoftLimit();
+ }
+ else
+ {
+ BOX_WARNING("Soft limit for account changed to ensure "
+ "housekeeping doesn't delete files on next run.");
+ }
+
+ if(pOldInfo->GetBlocksHardLimit() > minHard)
+ {
+ hardLimit = pOldInfo->GetBlocksHardLimit();
+ }
+ else
+ {
+ BOX_WARNING("Hard limit for account changed to ensure "
+ "housekeeping doesn't delete files on next run.");
+ }
}
-
+
// Object ID
int64_t lastObjID = mLastIDInInfo;
if(mLostAndFoundDirectoryID != 0)
@@ -662,11 +664,24 @@ void BackupStoreCheck::WriteNewStoreInfo()
hardLimit,
(pOldInfo.get() ? pOldInfo->IsAccountEnabled() : true),
*extra_data));
- info->AdjustNumFiles(mNumFiles);
+ info->AdjustNumCurrentFiles(mNumCurrentFiles);
info->AdjustNumOldFiles(mNumOldFiles);
info->AdjustNumDeletedFiles(mNumDeletedFiles);
info->AdjustNumDirectories(mNumDirectories);
+ // If there are any errors (apart from wrong block counts), then we
+ // should reset the ClientStoreMarker to zero, which
+ // CreateForRegeneration does. But if there are no major errors, then
+ // we should maintain the old ClientStoreMarker, to avoid invalidating
+ // the client's directory cache.
+ if (pOldInfo.get() && !mNumberErrorsFound)
+ {
+ BOX_INFO("No major errors found, preserving old "
+ "ClientStoreMarker: " <<
+ pOldInfo->GetClientStoreMarker());
+ info->SetClientStoreMarker(pOldInfo->GetClientStoreMarker());
+ }
+
if(pOldInfo.get())
{
mNumberErrorsFound += info->ReportChangesTo(*pOldInfo);
@@ -676,7 +691,7 @@ void BackupStoreCheck::WriteNewStoreInfo()
if(mFixErrors)
{
info->Save();
- BOX_NOTICE("New store info file written successfully.");
+ BOX_INFO("New store info file written successfully.");
}
}
@@ -695,7 +710,7 @@ void BackupStoreCheck::WriteNewStoreInfo()
bool BackupStoreDirectory::CheckAndFix()
{
bool changed = false;
-
+
// Check that if a file depends on a new version, that version is in this directory
bool restart;
@@ -718,11 +733,11 @@ bool BackupStoreDirectory::CheckAndFix()
"on newer version " <<
FMT_OID(dependsNewer) <<
" which doesn't exist");
-
+
// Remove
delete *i;
mEntries.erase(i);
-
+
// Mark as changed
changed = true;
@@ -751,7 +766,7 @@ bool BackupStoreDirectory::CheckAndFix()
}
}
while(restart);
-
+
// Check that if a file has a dependency marked, it exists, and remove it if it doesn't
{
std::vector<Entry*>::iterator i(mEntries.begin());
@@ -768,7 +783,7 @@ bool BackupStoreDirectory::CheckAndFix()
"info cleared");
(*i)->SetDependsOlder(0);
-
+
// Mark as changed
changed = true;
}
@@ -780,7 +795,7 @@ bool BackupStoreDirectory::CheckAndFix()
{
// Reset change marker
ch = false;
-
+
// Search backwards -- so see newer versions first
std::vector<Entry*>::iterator i(mEntries.end());
if(i == mEntries.begin())
@@ -806,10 +821,8 @@ bool BackupStoreDirectory::CheckAndFix()
}
else
{
- bool isDir = (((*i)->GetFlags() & Entry::Flags_Dir) == Entry::Flags_Dir);
-
// Check mutually exclusive flags
- if(isDir && (((*i)->GetFlags() & Entry::Flags_File) == Entry::Flags_File))
+ if((*i)->IsDir() && (*i)->IsFile())
{
// Bad! Unset the file flag
BOX_TRACE("Entry " << FMT_i <<
@@ -863,29 +876,29 @@ bool BackupStoreDirectory::CheckAndFix()
}
}
}
-
+
if(removeEntry)
{
// Mark something as changed, in loop
ch = true;
-
+
// Mark something as globally changed
changed = true;
-
+
// erase the thing from the list
Entry *pentry = (*i);
mEntries.erase(i);
// And delete the entry object
delete pentry;
-
+
// Stop going around this loop, as the iterator is now invalid
break;
}
} while(i != mEntries.begin());
} while(ch != false);
-
+
return changed;
}