diff --git a/src/internal/kopia/backup_bases.go b/src/internal/kopia/backup_bases.go index 18624236b..a360ef577 100644 --- a/src/internal/kopia/backup_bases.go +++ b/src/internal/kopia/backup_bases.go @@ -5,8 +5,10 @@ import ( "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" + "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/backup/identity" "github.com/alcionai/corso/src/pkg/logger" @@ -35,15 +37,16 @@ type BackupBases interface { // and assist bases. If DisableAssistBases has been called then it returns // nil. SnapshotAssistBases() []ManifestEntry + + // TODO(ashmrtn): Remove other functions and just have these once other code + // is updated. Here for now so changes in this file can be made. + NewMergeBases() []BackupBase + NewUniqueAssistBases() []BackupBase } type backupBases struct { - // backups and mergeBases should be modified together as they relate similar - // data. - backups []BackupEntry - mergeBases []ManifestEntry - assistBackups []BackupEntry - assistBases []ManifestEntry + mergeBases []BackupBase + assistBases []BackupBase // disableAssistBases denote whether any assist bases should be returned to // kopia during snapshot operation. @@ -55,48 +58,50 @@ func (bb *backupBases) SnapshotAssistBases() []ManifestEntry { return nil } + res := []ManifestEntry{} + + for _, ab := range bb.assistBases { + res = append(res, ManifestEntry{ + Manifest: ab.ItemDataSnapshot, + Reasons: ab.Reasons, + }) + } + + for _, mb := range bb.mergeBases { + res = append(res, ManifestEntry{ + Manifest: mb.ItemDataSnapshot, + Reasons: mb.Reasons, + }) + } + // Need to use the actual variables here because the functions will return nil // depending on what's been marked as disabled. - return append(slices.Clone(bb.assistBases), bb.mergeBases...) + return res } func (bb *backupBases) ConvertToAssistBase(manifestID manifest.ID) { - var ( - snapshotMan ManifestEntry - base BackupEntry - snapFound bool - ) - idx := slices.IndexFunc( bb.mergeBases, - func(man ManifestEntry) bool { - return man.ID == manifestID + func(base BackupBase) bool { + return base.ItemDataSnapshot.ID == manifestID }) if idx >= 0 { - snapFound = true - snapshotMan = bb.mergeBases[idx] + bb.assistBases = append(bb.assistBases, bb.mergeBases[idx]) bb.mergeBases = slices.Delete(bb.mergeBases, idx, idx+1) } - - idx = slices.IndexFunc( - bb.backups, - func(bup BackupEntry) bool { - return bup.SnapshotID == string(manifestID) - }) - if idx >= 0 { - base = bb.backups[idx] - bb.backups = slices.Delete(bb.backups, idx, idx+1) - } - - // Account for whether we found the backup. - if idx >= 0 && snapFound { - bb.assistBackups = append(bb.assistBackups, base) - bb.assistBases = append(bb.assistBases, snapshotMan) - } } func (bb backupBases) Backups() []BackupEntry { - return slices.Clone(bb.backups) + res := []BackupEntry{} + + for _, mb := range bb.mergeBases { + res = append(res, BackupEntry{ + Backup: mb.Backup, + Reasons: mb.Reasons, + }) + } + + return res } func (bb backupBases) UniqueAssistBackups() []BackupEntry { @@ -104,7 +109,16 @@ func (bb backupBases) UniqueAssistBackups() []BackupEntry { return nil } - return slices.Clone(bb.assistBackups) + res := []BackupEntry{} + + for _, ab := range bb.assistBases { + res = append(res, BackupEntry{ + Backup: ab.Backup, + Reasons: ab.Reasons, + }) + } + + return res } func (bb *backupBases) MinBackupVersion() int { @@ -114,9 +128,9 @@ func (bb *backupBases) MinBackupVersion() int { return min } - for _, bup := range bb.backups { - if min == version.NoBackup || bup.Version < min { - min = bup.Version + for _, base := range bb.mergeBases { + if min == version.NoBackup || base.Backup.Version < min { + min = base.Backup.Version } } @@ -124,6 +138,19 @@ func (bb *backupBases) MinBackupVersion() int { } func (bb backupBases) MergeBases() []ManifestEntry { + res := []ManifestEntry{} + + for _, mb := range bb.mergeBases { + res = append(res, ManifestEntry{ + Manifest: mb.ItemDataSnapshot, + Reasons: mb.Reasons, + }) + } + + return res +} + +func (bb backupBases) NewMergeBases() []BackupBase { return slices.Clone(bb.mergeBases) } @@ -134,10 +161,8 @@ func (bb *backupBases) DisableMergeBases() { // in the merge set since then we won't return the bases when merging backup // details. bb.assistBases = append(bb.assistBases, bb.mergeBases...) - bb.assistBackups = append(bb.assistBackups, bb.backups...) bb.mergeBases = nil - bb.backups = nil } func (bb backupBases) UniqueAssistBases() []ManifestEntry { @@ -145,6 +170,23 @@ func (bb backupBases) UniqueAssistBases() []ManifestEntry { return nil } + res := []ManifestEntry{} + + for _, ab := range bb.assistBases { + res = append(res, ManifestEntry{ + Manifest: ab.ItemDataSnapshot, + Reasons: ab.Reasons, + }) + } + + return res +} + +func (bb backupBases) NewUniqueAssistBases() []BackupBase { + if bb.disableAssistBases { + return nil + } + return slices.Clone(bb.assistBases) } @@ -152,6 +194,36 @@ func (bb *backupBases) DisableAssistBases() { bb.disableAssistBases = true } +func getMissingBases( + reasonToKey func(identity.Reasoner) string, + seen map[string]struct{}, + toCheck []BackupBase, +) []BackupBase { + var res []BackupBase + + for _, base := range toCheck { + useReasons := []identity.Reasoner{} + + for _, r := range base.Reasons { + k := reasonToKey(r) + if _, ok := seen[k]; ok { + // This Reason is already "covered" by a previously seen base. Skip + // adding the Reason to the base being examined. + continue + } + + useReasons = append(useReasons, r) + } + + if len(useReasons) > 0 { + base.Reasons = useReasons + res = append(res, base) + } + } + + return res +} + // MergeBackupBases reduces the two BackupBases into a single BackupBase. // Assumes the passed in BackupBases represents a prior backup version (across // some migration that disrupts lookup), and that the BackupBases used to call @@ -178,19 +250,23 @@ func (bb *backupBases) MergeBackupBases( other BackupBases, reasonToKey func(reason identity.Reasoner) string, ) BackupBases { - if other == nil || (len(other.MergeBases()) == 0 && len(other.UniqueAssistBases()) == 0) { + if other == nil || (len(other.NewMergeBases()) == 0 && len(other.NewUniqueAssistBases()) == 0) { return bb } - if bb == nil || (len(bb.MergeBases()) == 0 && len(bb.UniqueAssistBases()) == 0) { + if bb == nil || (len(bb.NewMergeBases()) == 0 && len(bb.NewUniqueAssistBases()) == 0) { return other } toMerge := map[string]struct{}{} assist := map[string]struct{}{} - // Track the bases in bb. - for _, m := range bb.mergeBases { + // Track the bases in bb. We need to know the Reason(s) covered by merge bases + // and the Reason(s) covered by assist bases separately because the former + // dictates whether we need to select a merge base and an assist base from + // other while the latter dictates whether we need to select an assist base + // from other. + for _, m := range bb.MergeBases() { for _, r := range m.Reasons { k := reasonToKey(r) @@ -199,248 +275,214 @@ func (bb *backupBases) MergeBackupBases( } } - for _, m := range bb.assistBases { + for _, m := range bb.UniqueAssistBases() { for _, r := range m.Reasons { k := reasonToKey(r) assist[k] = struct{}{} } } - var toAdd []ManifestEntry - - // Calculate the set of mergeBases to pull from other into this one. - for _, m := range other.MergeBases() { - useReasons := []identity.Reasoner{} - - for _, r := range m.Reasons { - k := reasonToKey(r) - if _, ok := toMerge[k]; ok { - // Assume other contains prior manifest versions. - // We don't want to stack a prior version incomplete onto - // a current version's complete snapshot. - continue - } - - useReasons = append(useReasons, r) - } - - if len(useReasons) > 0 { - m.Reasons = useReasons - toAdd = append(toAdd, m) - } - } + addMerge := getMissingBases(reasonToKey, toMerge, other.NewMergeBases()) + addAssist := getMissingBases(reasonToKey, assist, other.NewUniqueAssistBases()) res := &backupBases{ - backups: bb.Backups(), - mergeBases: bb.MergeBases(), - assistBases: bb.UniqueAssistBases(), - // Note that assistBackups are a new feature and don't exist - // in prior versions where we were using UPN based reasons i.e. - // other won't have any assistBackups. - assistBackups: bb.UniqueAssistBackups(), - } - - // Add new mergeBases and backups. - for _, man := range toAdd { - // Will get empty string if not found which is fine, it'll fail one of the - // other checks. - bID, _ := man.GetTag(TagBackupID) - - bup, ok := getBackupByID(other.Backups(), bID) - if !ok { - logger.Ctx(ctx).Infow( - "not unioning snapshot missing backup", - "other_manifest_id", man.ID, - "other_backup_id", bID) - - continue - } - - bup.Reasons = man.Reasons - - res.backups = append(res.backups, bup) - res.mergeBases = append(res.mergeBases, man) + mergeBases: append(addMerge, bb.NewMergeBases()...), + assistBases: append(addAssist, bb.NewUniqueAssistBases()...), } return res } -func findNonUniqueManifests( +func fixupMinRequirements( ctx context.Context, - manifests []ManifestEntry, -) map[manifest.ID]struct{} { - // ReasonKey -> manifests with that reason. - reasons := map[string][]ManifestEntry{} - toDrop := map[manifest.ID]struct{}{} + baseSet []BackupBase, +) []BackupBase { + res := make([]BackupBase, 0, len(baseSet)) - for _, man := range manifests { - // Incomplete snapshots are used only for kopia-assisted incrementals. The - // fact that we need this check here makes it seem like this should live in - // the kopia code. However, keeping it here allows for better debugging as - // the kopia code only has access to a path builder which means it cannot - // remove the resource owner from the error/log output. That is also below - // the point where we decide if we should do a full backup or an incremental. - if len(man.IncompleteReason) > 0 { - logger.Ctx(ctx).Infow( - "dropping incomplete manifest", - "manifest_id", man.ID) + for _, base := range baseSet { + var ( + backupID model.StableID + snapID manifest.ID + snapIncomplete bool + deetsID string + ) - toDrop[man.ID] = struct{}{} + if base.Backup != nil { + backupID = base.Backup.ID - continue - } - - for _, reason := range man.Reasons { - mapKey := reasonKey(reason) - reasons[mapKey] = append(reasons[mapKey], man) - } - } - - for reason, mans := range reasons { - ictx := clues.Add(ctx, "reason", reason) - - if len(mans) == 0 { - // Not sure how this would happen but just in case... - continue - } else if len(mans) > 1 { - mIDs := make([]manifest.ID, 0, len(mans)) - for _, m := range mans { - toDrop[m.ID] = struct{}{} - mIDs = append(mIDs, m.ID) + deetsID = base.Backup.StreamStoreID + if len(deetsID) == 0 { + deetsID = base.Backup.DetailsID } + } - // TODO(ashmrtn): We should actually just remove this reason from the - // manifests and then if they have no reasons remaining drop them from the - // set. - logger.Ctx(ictx).Infow( - "dropping manifests with duplicate reason", - "manifest_ids", mIDs) + if base.ItemDataSnapshot != nil { + snapID = base.ItemDataSnapshot.ID + snapIncomplete = len(base.ItemDataSnapshot.IncompleteReason) > 0 + } + + ictx := clues.Add( + ctx, + "base_backup_id", backupID, + "base_item_data_snapshot_id", snapID, + "base_details_id", deetsID) + + switch { + case len(backupID) == 0: + logger.Ctx(ictx).Info("dropping base missing backup model") + continue + + case len(snapID) == 0: + logger.Ctx(ictx).Info("dropping base missing item data snapshot") + continue + + case snapIncomplete: + logger.Ctx(ictx).Info("dropping base with incomplete item data snapshot") + continue + + case len(deetsID) == 0: + logger.Ctx(ictx).Info("dropping base missing backup details") + continue + + case len(base.Reasons) == 0: + // Not sure how we'd end up here, but just to make sure we're really + // getting what we expect. + logger.Ctx(ictx).Info("dropping base with no marked Reasons") continue } + + res = append(res, base) } - return toDrop + return res } -func getBackupByID(backups []BackupEntry, bID string) (BackupEntry, bool) { - if len(bID) == 0 { - return BackupEntry{}, false +func fixupReasons( + ctx context.Context, + baseSet []BackupBase, +) []BackupBase { + // Associate a Reason with a set of bases since the basesByReason map needs a + // string key. + type baseEntry struct { + bases []BackupBase + reason identity.Reasoner } - idx := slices.IndexFunc(backups, func(b BackupEntry) bool { - return string(b.ID) == bID - }) + var ( + basesByReason = map[string]baseEntry{} + // res holds a mapping from backup ID -> base. We need this additional level + // of indirection when determining what to return because a base may be + // selected for multiple reasons. This map allows us to consolidate that + // into a single base result for all reasons easily. + res = map[model.StableID]BackupBase{} + ) - if idx < 0 || idx >= len(backups) { - return BackupEntry{}, false + // Organize all the base(s) by the Reason(s) they were chosen. A base can + // exist in multiple slices in the map if it was selected for multiple + // Reasons. + for _, base := range baseSet { + for _, reason := range base.Reasons { + foundBases := basesByReason[reasonKey(reason)] + foundBases.reason = reason + foundBases.bases = append(foundBases.bases, base) + + basesByReason[reasonKey(reason)] = foundBases + } } - return backups[idx], true + // Go through the map and check that the length of each slice is 1. If it's + // longer than that then we somehow got multiple bases for the same Reason and + // should drop the extras. + for _, bases := range basesByReason { + ictx := clues.Add( + ctx, + "verify_service", bases.reason.Service().String(), + "verify_category", bases.reason.Category().String()) + + // Not sure how we'd actually get here but handle it anyway. + if len(bases.bases) == 0 { + logger.Ctx(ictx).Info("no bases found for reason") + continue + } + + // We've got at least one base for this Reason. The below finds which base + // to keep based on the creation time of the bases. If there's multiple + // bases in the input slice then we'll log information about the ones that + // we didn't add to the result set. + + // Sort in reverse chronological order so that it's easy to find the + // youngest base. + slices.SortFunc(bases.bases, func(a, b BackupBase) int { + return -a.Backup.CreationTime.Compare(b.Backup.CreationTime) + }) + + keepBase := bases.bases[0] + + // Add the youngest base to the result set. We add each Reason for selecting + // the base individually so that bases dropped for a particular Reason (or + // dropped completely because they overlap for all Reasons) happens without + // additional logic. The dropped (Reason, base) pair will just never be + // added to the result set to begin with. + b, ok := res[keepBase.Backup.ID] + if ok { + // We've already seen this base, just add this Reason to it as well. + b.Reasons = append(b.Reasons, bases.reason) + res[keepBase.Backup.ID] = b + + continue + } + + // We haven't seen this base before. We want to clear all the Reasons for it + // except the one we're currently examining. That allows us to just not add + // bases that are duplicates for a Reason to res and still end up with the + // correct output. + keepBase.Reasons = []identity.Reasoner{bases.reason} + res[keepBase.Backup.ID] = keepBase + + // Don't log about dropped bases if there was only one base. + if len(bases.bases) == 1 { + continue + } + + // This is purely for debugging, but log the base(s) that we dropped for + // this Reason. + var dropped []model.StableID + + for _, b := range bases.bases[1:] { + dropped = append(dropped, b.Backup.ID) + } + + logger.Ctx(ictx).Infow( + "dropping bases for reason", + "dropped_backup_ids", dropped) + } + + return maps.Values(res) } // fixupAndVerify goes through the set of backups and snapshots used for merging // and ensures: // - the reasons for selecting merge snapshots are distinct -// - all bases used for merging have a backup model with item and details -// snapshot ID +// - all bases have a backup model with item and details snapshot IDs +// - all bases have both a backup and item data snapshot present +// - all bases have item data snapshots with no incomplete reason // // Backups that have overlapping reasons or that are not complete are removed // from the set. Dropping these is safe because it only affects how much data we // pull. On the other hand, *not* dropping them is unsafe as it will muck up // merging when we add stuff to kopia (possibly multiple entries for the same // item etc). -// -// TODO(pandeyabs): Refactor common code into a helper as part of #3943. func (bb *backupBases) fixupAndVerify(ctx context.Context) { - toDrop := findNonUniqueManifests(ctx, bb.mergeBases) + // Start off by removing bases that don't meet the minimum requirements of + // having a backup model and item data snapshot or having a backup details ID. + // These requirements apply to both merge and assist bases. + bb.mergeBases = fixupMinRequirements(ctx, bb.mergeBases) + bb.assistBases = fixupMinRequirements(ctx, bb.assistBases) - var ( - backupsToKeep []BackupEntry - assistBackupsToKeep []BackupEntry - mergeToKeep []ManifestEntry - assistToKeep []ManifestEntry - ) - - for _, man := range bb.mergeBases { - if _, ok := toDrop[man.ID]; ok { - continue - } - - bID, _ := man.GetTag(TagBackupID) - - bup, ok := getBackupByID(bb.backups, bID) - if !ok { - toDrop[man.ID] = struct{}{} - - logger.Ctx(ctx).Info( - "dropping merge base due to missing backup", - "manifest_id", man.ID) - - continue - } - - deetsID := bup.StreamStoreID - if len(deetsID) == 0 { - deetsID = bup.DetailsID - } - - if len(bup.SnapshotID) == 0 || len(deetsID) == 0 { - toDrop[man.ID] = struct{}{} - - logger.Ctx(ctx).Info( - "dropping merge base due to invalid backup", - "manifest_id", man.ID) - - continue - } - - backupsToKeep = append(backupsToKeep, bup) - mergeToKeep = append(mergeToKeep, man) - } - - // Drop assist snapshots with overlapping reasons. - toDropAssists := findNonUniqueManifests(ctx, bb.assistBases) - - for _, man := range bb.assistBases { - if _, ok := toDropAssists[man.ID]; ok { - continue - } - - bID, _ := man.GetTag(TagBackupID) - - bup, ok := getBackupByID(bb.assistBackups, bID) - if !ok { - toDrop[man.ID] = struct{}{} - - logger.Ctx(ctx).Info( - "dropping assist base due to missing backup", - "manifest_id", man.ID) - - continue - } - - deetsID := bup.StreamStoreID - if len(deetsID) == 0 { - deetsID = bup.DetailsID - } - - if len(bup.SnapshotID) == 0 || len(deetsID) == 0 { - toDrop[man.ID] = struct{}{} - - logger.Ctx(ctx).Info( - "dropping assist base due to invalid backup", - "manifest_id", man.ID) - - continue - } - - assistBackupsToKeep = append(assistBackupsToKeep, bup) - assistToKeep = append(assistToKeep, man) - } - - bb.backups = backupsToKeep - bb.mergeBases = mergeToKeep - bb.assistBases = assistToKeep - bb.assistBackups = assistBackupsToKeep + // Remove merge bases that have overlapping Reasons. It's alright to call this + // on assist bases too because we only expect at most one assist base per + // Reason. + bb.mergeBases = fixupReasons(ctx, bb.mergeBases) + bb.assistBases = fixupReasons(ctx, bb.assistBases) } diff --git a/src/internal/kopia/backup_bases_test.go b/src/internal/kopia/backup_bases_test.go index 71fe9d87d..d793a76e7 100644 --- a/src/internal/kopia/backup_bases_test.go +++ b/src/internal/kopia/backup_bases_test.go @@ -3,12 +3,14 @@ package kopia import ( "fmt" "testing" + "time" + "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" @@ -18,19 +20,69 @@ import ( "github.com/alcionai/corso/src/pkg/path" ) -func makeManifest(id, incmpl, bID string, reasons ...identity.Reasoner) ManifestEntry { +func makeManifest(id, incmpl, bID string) *snapshot.Manifest { bIDKey, _ := makeTagKV(TagBackupID) - return ManifestEntry{ - Manifest: &snapshot.Manifest{ - ID: manifest.ID(id), - IncompleteReason: incmpl, - Tags: map[string]string{bIDKey: bID}, - }, - Reasons: reasons, + return &snapshot.Manifest{ + ID: manifest.ID(id), + IncompleteReason: incmpl, + Tags: map[string]string{bIDKey: bID}, } } +type testInput struct { + tenant string + protectedResource string + id int + cat []path.CategoryType + isAssist bool + incompleteReason string +} + +func makeBase(ti testInput) BackupBase { + baseID := fmt.Sprintf("id%d", ti.id) + reasons := make([]identity.Reasoner, 0, len(ti.cat)) + + for _, c := range ti.cat { + reasons = append( + reasons, + identity.NewReason( + ti.tenant, + ti.protectedResource, + path.ExchangeService, + c)) + } + + return BackupBase{ + Backup: &backup.Backup{ + BaseModel: model.BaseModel{ID: model.StableID("b" + baseID)}, + SnapshotID: baseID, + StreamStoreID: "ss" + baseID, + }, + ItemDataSnapshot: makeManifest(baseID, ti.incompleteReason, "b"+baseID), + Reasons: reasons, + } +} + +// Make a function so tests can modify things without messing with each other. +func makeBackupBases( + mergeInputs []testInput, + assistInputs []testInput, +) *backupBases { + res := &backupBases{} + + for _, i := range mergeInputs { + res.mergeBases = append(res.mergeBases, makeBase(i)) + } + + for _, i := range assistInputs { + i.isAssist = true + res.assistBases = append(res.assistBases, makeBase(i)) + } + + return res +} + type BackupBasesUnitSuite struct { tester.Suite } @@ -57,7 +109,7 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { { name: "Unsorted Backups", bb: &backupBases{ - backups: []BackupEntry{ + mergeBases: []BackupBase{ { Backup: &backup.Backup{ Version: 4, @@ -77,6 +129,29 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { }, expectedVersion: 0, }, + { + name: "Only Assist Bases", + bb: &backupBases{ + assistBases: []BackupBase{ + { + Backup: &backup.Backup{ + Version: 4, + }, + }, + { + Backup: &backup.Backup{ + Version: 0, + }, + }, + { + Backup: &backup.Backup{ + Version: 2, + }, + }, + }, + }, + expectedVersion: version.NoBackup, + }, } for _, test := range table { suite.Run(test.name, func() { @@ -86,82 +161,79 @@ func (suite *BackupBasesUnitSuite) TestMinBackupVersion() { } func (suite *BackupBasesUnitSuite) TestConvertToAssistBase() { - backups := []BackupEntry{ - {Backup: &backup.Backup{SnapshotID: "1"}}, - {Backup: &backup.Backup{SnapshotID: "2"}}, - {Backup: &backup.Backup{SnapshotID: "3"}}, + bases := []BackupBase{ + { + Backup: &backup.Backup{ + BaseModel: model.BaseModel{ + ID: "1", + }, + SnapshotID: "its1", + }, + ItemDataSnapshot: makeManifest("its1", "", ""), + }, + { + Backup: &backup.Backup{ + BaseModel: model.BaseModel{ + ID: "2", + }, + SnapshotID: "its2", + }, + ItemDataSnapshot: makeManifest("its2", "", ""), + }, + { + Backup: &backup.Backup{ + BaseModel: model.BaseModel{ + ID: "3", + }, + SnapshotID: "its3", + }, + ItemDataSnapshot: makeManifest("its3", "", ""), + }, } - merges := []ManifestEntry{ - makeManifest("1", "", ""), - makeManifest("2", "", ""), - makeManifest("3", "", ""), - } - - delID := manifest.ID("3") + delID := manifest.ID("its3") table := []struct { name string // Below indices specify which items to add from the defined sets above. - backup []int merge []int assist []int + expectMerge []int expectAssist []int }{ { name: "Not In Bases", - backup: []int{0, 1}, merge: []int{0, 1}, assist: []int{0, 1}, + expectMerge: []int{0, 1}, expectAssist: []int{0, 1}, }, - { - name: "Different Indexes", - backup: []int{2, 0, 1}, - merge: []int{0, 2, 1}, - assist: []int{0, 1}, - expectAssist: []int{0, 1, 2}, - }, { name: "First Item", - backup: []int{2, 0, 1}, merge: []int{2, 0, 1}, assist: []int{0, 1}, + expectMerge: []int{0, 1}, expectAssist: []int{0, 1, 2}, }, { name: "Middle Item", - backup: []int{0, 2, 1}, merge: []int{0, 2, 1}, assist: []int{0, 1}, + expectMerge: []int{0, 1}, expectAssist: []int{0, 1, 2}, }, { name: "Final Item", - backup: []int{0, 1, 2}, merge: []int{0, 1, 2}, assist: []int{0, 1}, + expectMerge: []int{0, 1}, expectAssist: []int{0, 1, 2}, }, - { - name: "Only In Backups", - backup: []int{0, 1, 2}, - merge: []int{0, 1}, - assist: []int{0, 1}, - expectAssist: []int{0, 1}, - }, - { - name: "Only In Merges", - backup: []int{0, 1}, - merge: []int{0, 1, 2}, - assist: []int{0, 1}, - expectAssist: []int{0, 1}, - }, { name: "Only In Assists Noops", - backup: []int{0, 1}, merge: []int{0, 1}, assist: []int{0, 1, 2}, + expectMerge: []int{0, 1}, expectAssist: []int{0, 1, 2}, }, } @@ -171,27 +243,22 @@ func (suite *BackupBasesUnitSuite) TestConvertToAssistBase() { t := suite.T() bb := &backupBases{} - for _, i := range test.backup { - bb.backups = append(bb.backups, backups[i]) - } - for _, i := range test.merge { - bb.mergeBases = append(bb.mergeBases, merges[i]) + bb.mergeBases = append(bb.mergeBases, bases[i]) } for _, i := range test.assist { - bb.assistBases = append(bb.assistBases, merges[i]) - bb.assistBackups = append(bb.assistBackups, backups[i]) + bb.assistBases = append(bb.assistBases, bases[i]) } - expected := &backupBases{ - backups: []BackupEntry{backups[0], backups[1]}, - mergeBases: []ManifestEntry{merges[0], merges[1]}, + expected := &backupBases{} + + for _, i := range test.expectMerge { + expected.mergeBases = append(expected.mergeBases, bases[i]) } for _, i := range test.expectAssist { - expected.assistBases = append(expected.assistBases, merges[i]) - expected.assistBackups = append(expected.assistBackups, backups[i]) + expected.assistBases = append(expected.assistBases, bases[i]) } bb.ConvertToAssistBase(delID) @@ -203,44 +270,74 @@ func (suite *BackupBasesUnitSuite) TestConvertToAssistBase() { func (suite *BackupBasesUnitSuite) TestDisableMergeBases() { t := suite.T() - merge := []BackupEntry{ - {Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "m1"}}}, - {Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "m2"}}}, + merge := []BackupBase{ + { + Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "m1"}}, + ItemDataSnapshot: &snapshot.Manifest{ID: "ms1"}, + }, + { + Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "m2"}}, + ItemDataSnapshot: &snapshot.Manifest{ID: "ms2"}, + }, } - assist := []BackupEntry{ - {Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "a1"}}}, - {Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "a2"}}}, + + assist := []BackupBase{ + { + Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "a1"}}, + ItemDataSnapshot: &snapshot.Manifest{ID: "as1"}, + }, + { + Backup: &backup.Backup{BaseModel: model.BaseModel{ID: "a2"}}, + ItemDataSnapshot: &snapshot.Manifest{ID: "as2"}, + }, } bb := &backupBases{ - backups: slices.Clone(merge), - mergeBases: make([]ManifestEntry, 2), - assistBackups: slices.Clone(assist), - assistBases: make([]ManifestEntry, 2), + mergeBases: merge, + assistBases: assist, } bb.DisableMergeBases() assert.Empty(t, bb.Backups()) assert.Empty(t, bb.MergeBases()) - // Assist base set now has what used to be merge bases. - assert.Len(t, bb.UniqueAssistBases(), 4) - assert.Len(t, bb.SnapshotAssistBases(), 4) - // Merge bases should appear in the set of backup bases used for details - // merging. + // Merge bases should still appear in the assist base set passed in for kopia + // snapshots and details merging. assert.ElementsMatch( t, - append(slices.Clone(merge), assist...), + []ManifestEntry{ + {Manifest: merge[0].ItemDataSnapshot}, + {Manifest: merge[1].ItemDataSnapshot}, + {Manifest: assist[0].ItemDataSnapshot}, + {Manifest: assist[1].ItemDataSnapshot}, + }, + bb.SnapshotAssistBases()) + + assert.ElementsMatch( + t, + []ManifestEntry{ + {Manifest: merge[0].ItemDataSnapshot}, + {Manifest: merge[1].ItemDataSnapshot}, + {Manifest: assist[0].ItemDataSnapshot}, + {Manifest: assist[1].ItemDataSnapshot}, + }, + bb.UniqueAssistBases()) + assert.ElementsMatch( + t, + []BackupEntry{ + {Backup: merge[0].Backup}, + {Backup: merge[1].Backup}, + {Backup: assist[0].Backup}, + {Backup: assist[1].Backup}, + }, bb.UniqueAssistBackups()) } func (suite *BackupBasesUnitSuite) TestDisableAssistBases() { t := suite.T() bb := &backupBases{ - backups: make([]BackupEntry, 2), - mergeBases: make([]ManifestEntry, 2), - assistBases: make([]ManifestEntry, 2), - assistBackups: make([]BackupEntry, 2), + mergeBases: make([]BackupBase, 2), + assistBases: make([]BackupBase, 2), } bb.DisableAssistBases() @@ -254,70 +351,6 @@ func (suite *BackupBasesUnitSuite) TestDisableAssistBases() { } func (suite *BackupBasesUnitSuite) TestMergeBackupBases() { - ro := "resource_owner" - - type testInput struct { - id int - cat []path.CategoryType - } - - // Make a function so tests can modify things without messing with each other. - makeBackupBases := func(mergeInputs []testInput, assistInputs []testInput) *backupBases { - res := &backupBases{} - - for _, i := range mergeInputs { - baseID := fmt.Sprintf("id%d", i.id) - reasons := make([]identity.Reasoner, 0, len(i.cat)) - - for _, c := range i.cat { - reasons = append(reasons, identity.NewReason("", ro, path.ExchangeService, c)) - } - - m := makeManifest(baseID, "", "b"+baseID, reasons...) - - b := BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ID: model.StableID("b" + baseID)}, - SnapshotID: baseID, - StreamStoreID: "ss" + baseID, - }, - Reasons: reasons, - } - - res.backups = append(res.backups, b) - res.mergeBases = append(res.mergeBases, m) - } - - for _, i := range assistInputs { - baseID := fmt.Sprintf("id%d", i.id) - - reasons := make([]identity.Reasoner, 0, len(i.cat)) - - for _, c := range i.cat { - reasons = append(reasons, identity.NewReason("", ro, path.ExchangeService, c)) - } - - m := makeManifest(baseID, "", "a"+baseID, reasons...) - - b := BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("a" + baseID), - Tags: map[string]string{model.BackupTypeTag: model.AssistBackup}, - }, - SnapshotID: baseID, - StreamStoreID: "ss" + baseID, - }, - Reasons: reasons, - } - - res.assistBackups = append(res.assistBackups, b) - res.assistBases = append(res.assistBases, m) - } - - return res - } - table := []struct { name string merge []testInput @@ -528,44 +561,25 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() { func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { ro := "resource_owner" - makeMan := func(pct path.CategoryType, id, incmpl, bID string) ManifestEntry { - r := identity.NewReason("", ro, path.ExchangeService, pct) - return makeManifest(id, incmpl, bID, r) - } - // Make a function so tests can modify things without messing with each other. validMail1 := func() *backupBases { - return &backupBases{ - backups: []BackupEntry{ + return makeBackupBases( + []testInput{ { - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid1", - }, - SnapshotID: "id1", - StreamStoreID: "ssid1", - }, + tenant: "t", + protectedResource: ro, + id: 1, + cat: []path.CategoryType{path.EmailCategory}, }, }, - mergeBases: []ManifestEntry{ - makeMan(path.EmailCategory, "id1", "", "bid1"), - }, - assistBackups: []BackupEntry{ + []testInput{ { - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid2", - Tags: map[string]string{model.BackupTypeTag: model.AssistBackup}, - }, - SnapshotID: "id2", - StreamStoreID: "ssid2", - }, + tenant: "t", + protectedResource: ro, + id: 2, + cat: []path.CategoryType{path.EmailCategory}, }, - }, - assistBases: []ManifestEntry{ - makeMan(path.EmailCategory, "id2", "", "bid2"), - }, - } + }) } table := []struct { @@ -574,101 +588,111 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { expect BackupBases }{ { - name: "empty BaseBackups", + name: "EmptyBaseBackups", bb: &backupBases{}, }, { - name: "Merge Base Without Backup", + name: "MergeBase MissingBackup", bb: func() *backupBases { res := validMail1() - res.backups = nil + res.mergeBases[0].Backup = nil return res }(), expect: func() *backupBases { res := validMail1() res.mergeBases = nil - res.backups = nil return res }(), }, { - name: "Merge Backup Missing Snapshot ID", + name: "MergeBase MissingSnapshot", bb: func() *backupBases { res := validMail1() - res.backups[0].SnapshotID = "" + res.mergeBases[0].ItemDataSnapshot = nil return res }(), expect: func() *backupBases { res := validMail1() res.mergeBases = nil - res.backups = nil return res }(), }, { - name: "Assist backup missing snapshot ID", + name: "AssistBase MissingBackup", bb: func() *backupBases { res := validMail1() - res.assistBackups[0].SnapshotID = "" + res.assistBases[0].Backup = nil return res }(), expect: func() *backupBases { res := validMail1() res.assistBases = nil - res.assistBackups = nil return res }(), }, { - name: "Merge backup missing deets ID", + name: "AssistBase MissingSnapshot", bb: func() *backupBases { res := validMail1() - res.backups[0].StreamStoreID = "" - - return res - }(), - expect: func() *backupBases { - res := validMail1() - res.mergeBases = nil - res.backups = nil - - return res - }(), - }, - { - name: "Assist backup missing deets ID", - bb: func() *backupBases { - res := validMail1() - res.assistBackups[0].StreamStoreID = "" + res.assistBases[0].ItemDataSnapshot = nil return res }(), expect: func() *backupBases { res := validMail1() res.assistBases = nil - res.assistBackups = nil return res }(), }, { - name: "Incomplete Snapshot", + name: "MergeBase MissingDeetsID", bb: func() *backupBases { res := validMail1() - res.mergeBases[0].IncompleteReason = "ir" - res.assistBases[0].IncompleteReason = "ir" + res.mergeBases[0].Backup.StreamStoreID = "" + + return res + }(), + expect: func() *backupBases { + res := validMail1() + res.mergeBases = nil return res }(), }, { - name: "Duplicate Reason", + name: "AssistBase MissingDeetsID", + bb: func() *backupBases { + res := validMail1() + res.assistBases[0].Backup.StreamStoreID = "" + + return res + }(), + expect: func() *backupBases { + res := validMail1() + res.assistBases = nil + + return res + }(), + }, + { + name: "MergeAndAssistBase IncompleteSnapshot", + bb: func() *backupBases { + res := validMail1() + res.mergeBases[0].ItemDataSnapshot.IncompleteReason = "ir" + res.assistBases[0].ItemDataSnapshot.IncompleteReason = "ir" + + return res + }(), + }, + { + name: "MergeAndAssistBase DuplicateReasonInBase", bb: func() *backupBases { res := validMail1() res.mergeBases[0].Reasons = append( @@ -678,60 +702,79 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { res.assistBases[0].Reasons = append( res.assistBases[0].Reasons, res.assistBases[0].Reasons[0]) + + return res + }(), + expect: validMail1(), + }, + { + name: "MergeAndAssistBase MissingReason", + bb: func() *backupBases { + res := validMail1() + res.mergeBases[0].Reasons = nil + res.assistBases[0].Reasons = nil + return res }(), }, { - name: "Single Valid Entry", + name: "SingleValidEntry", bb: validMail1(), expect: validMail1(), }, { - name: "Single Valid Entry With Incomplete Assist With Same Reason", + name: "SingleValidEntry IncompleteAssistWithSameReason", bb: func() *backupBases { res := validMail1() res.assistBases = append( res.assistBases, - makeMan(path.EmailCategory, "id3", "checkpoint", "bid3")) + makeBase(testInput{ + tenant: "t", + protectedResource: "ro", + id: 3, + cat: []path.CategoryType{path.EmailCategory}, + isAssist: true, + incompleteReason: "checkpoint", + })) return res }(), expect: validMail1(), }, { - name: "Single Valid Entry With Backup With Old Deets ID", + name: "SingleValidEntry BackupWithOldDeetsID", bb: func() *backupBases { res := validMail1() - res.backups[0].DetailsID = res.backups[0].StreamStoreID - res.backups[0].StreamStoreID = "" + res.mergeBases[0].Backup.DetailsID = res.mergeBases[0].Backup.StreamStoreID + res.mergeBases[0].Backup.StreamStoreID = "" - res.assistBackups[0].DetailsID = res.assistBackups[0].StreamStoreID - res.assistBackups[0].StreamStoreID = "" + res.assistBases[0].Backup.DetailsID = res.assistBases[0].Backup.StreamStoreID + res.assistBases[0].Backup.StreamStoreID = "" return res }(), expect: func() *backupBases { res := validMail1() - res.backups[0].DetailsID = res.backups[0].StreamStoreID - res.backups[0].StreamStoreID = "" + res.mergeBases[0].Backup.DetailsID = res.mergeBases[0].Backup.StreamStoreID + res.mergeBases[0].Backup.StreamStoreID = "" - res.assistBackups[0].DetailsID = res.assistBackups[0].StreamStoreID - res.assistBackups[0].StreamStoreID = "" + res.assistBases[0].Backup.DetailsID = res.assistBases[0].Backup.StreamStoreID + res.assistBases[0].Backup.StreamStoreID = "" return res }(), }, { - name: "Single Valid Entry With Multiple Reasons", + name: "SingleValidEntry MultipleReasons", bb: func() *backupBases { res := validMail1() res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons, - identity.NewReason("", ro, path.ExchangeService, path.ContactsCategory)) + identity.NewReason("t", ro, path.ExchangeService, path.ContactsCategory)) res.assistBases[0].Reasons = append( res.assistBases[0].Reasons, - identity.NewReason("", ro, path.ExchangeService, path.ContactsCategory)) + identity.NewReason("t", ro, path.ExchangeService, path.ContactsCategory)) return res }(), @@ -739,127 +782,184 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() { res := validMail1() res.mergeBases[0].Reasons = append( res.mergeBases[0].Reasons, - identity.NewReason("", ro, path.ExchangeService, path.ContactsCategory)) + identity.NewReason("t", ro, path.ExchangeService, path.ContactsCategory)) res.assistBases[0].Reasons = append( res.assistBases[0].Reasons, - identity.NewReason("", ro, path.ExchangeService, path.ContactsCategory)) + identity.NewReason("t", ro, path.ExchangeService, path.ContactsCategory)) return res }(), }, { - name: "Two Entries Overlapping Reasons", + name: "TwoEntries OverlappingReasons", bb: func() *backupBases { - res := validMail1() - res.mergeBases = append( - res.mergeBases, - makeMan(path.EmailCategory, "id3", "", "bid3")) + t1, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(suite.T(), err, clues.ToCore(err)) - res.assistBases = append( - res.assistBases, - makeMan(path.EmailCategory, "id4", "", "bid4")) + res := validMail1() + res.mergeBases[0].Backup.CreationTime = t1 + res.assistBases[0].Backup.CreationTime = t1.Add(2 * time.Hour) + + other := makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 3, + cat: []path.CategoryType{path.EmailCategory}, + }) + other.Backup.CreationTime = t1.Add(-time.Minute) + + res.mergeBases = append(res.mergeBases, other) + + other = makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EmailCategory}, + isAssist: true, + }) + other.Backup.CreationTime = t1.Add(time.Hour) + + res.assistBases = append(res.assistBases, other) + + return res + }(), + expect: func() *backupBases { + t1, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(suite.T(), err, clues.ToCore(err)) + + res := validMail1() + res.mergeBases[0].Backup.CreationTime = t1 + res.assistBases[0].Backup.CreationTime = t1.Add(2 * time.Hour) return res }(), }, { - name: "Merge Backup, Three Entries One Invalid", + name: "TwoEntries OverlappingReasons OneInvalid", bb: func() *backupBases { + t1, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(suite.T(), err, clues.ToCore(err)) + + res := validMail1() + res.mergeBases[0].Backup.CreationTime = t1 + res.assistBases[0].Backup.CreationTime = t1.Add(2 * time.Hour) + + other := makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 3, + cat: []path.CategoryType{path.EmailCategory}, + }) + other.Backup.CreationTime = t1.Add(time.Minute) + other.Backup.StreamStoreID = "" + + res.mergeBases = append(res.mergeBases, other) + + other = makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EmailCategory}, + isAssist: true, + }) + other.Backup.CreationTime = t1.Add(3 * time.Hour) + other.Backup.StreamStoreID = "" + + res.assistBases = append(res.assistBases, other) + + return res + }(), + expect: func() *backupBases { + t1, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(suite.T(), err, clues.ToCore(err)) + + res := validMail1() + res.mergeBases[0].Backup.CreationTime = t1 + res.assistBases[0].Backup.CreationTime = t1.Add(2 * time.Hour) + + return res + }(), + }, + { + name: "MergeBase ThreeEntriesOneInvalid", + bb: func() *backupBases { + invalid := makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 3, + cat: []path.CategoryType{path.ContactsCategory}, + incompleteReason: "checkpoint", + }) + invalid.Backup.StreamStoreID = "" + invalid.Backup.SnapshotID = "" + res := validMail1() - res.backups = append( - res.backups, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid3", - }, - }, - }, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid4", - }, - SnapshotID: "id4", - StreamStoreID: "ssid4", - }, - }) res.mergeBases = append( res.mergeBases, - makeMan(path.ContactsCategory, "id3", "checkpoint", "bid3"), - makeMan(path.EventsCategory, "id4", "", "bid4")) + invalid, + makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EventsCategory}, + })) return res }(), expect: func() *backupBases { res := validMail1() - res.backups = append( - res.backups, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid4", - }, - SnapshotID: "id4", - StreamStoreID: "ssid4", - }, - }) res.mergeBases = append( res.mergeBases, - makeMan(path.EventsCategory, "id4", "", "bid4")) + makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EventsCategory}, + })) return res }(), }, { - name: "Assist Backup, Three Entries One Invalid", + name: "AssistBase ThreeEntriesOneInvalid", bb: func() *backupBases { + invalid := makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 3, + cat: []path.CategoryType{path.ContactsCategory}, + isAssist: true, + incompleteReason: "checkpoint", + }) + invalid.Backup.StreamStoreID = "" + invalid.Backup.SnapshotID = "" + res := validMail1() - res.assistBackups = append( - res.assistBackups, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid3", - Tags: map[string]string{model.BackupTypeTag: model.AssistBackup}, - }, - }, - }, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid4", - Tags: map[string]string{model.BackupTypeTag: model.AssistBackup}, - }, - SnapshotID: "id4", - StreamStoreID: "ssid4", - }, - }) res.assistBases = append( res.assistBases, - makeMan(path.ContactsCategory, "id3", "checkpoint", "bid3"), - makeMan(path.EventsCategory, "id4", "", "bid4")) + invalid, + makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EventsCategory}, + isAssist: true, + })) return res }(), expect: func() *backupBases { res := validMail1() - res.assistBackups = append( - res.assistBackups, - BackupEntry{ - Backup: &backup.Backup{ - BaseModel: model.BaseModel{ - ID: "bid4", - Tags: map[string]string{model.BackupTypeTag: model.AssistBackup}, - }, - SnapshotID: "id4", - StreamStoreID: "ssid4", - }, - }) res.assistBases = append( res.assistBases, - makeMan(path.EventsCategory, "id4", "", "bid4")) + makeBase(testInput{ + tenant: "t", + protectedResource: ro, + id: 4, + cat: []path.CategoryType{path.EventsCategory}, + isAssist: true, + })) return res }(), diff --git a/src/internal/kopia/base_finder.go b/src/internal/kopia/base_finder.go index d6fd20a7e..1cf07437f 100644 --- a/src/internal/kopia/base_finder.go +++ b/src/internal/kopia/base_finder.go @@ -136,7 +136,7 @@ func (b *baseFinder) getBackupModel( return bup, nil } -type backupBase struct { +type BackupBase struct { Backup *backup.Backup ItemDataSnapshot *snapshot.Manifest // Reasons contains the tenant, protected resource and service/categories that @@ -157,7 +157,7 @@ func (b *baseFinder) findBasesInSet( ctx context.Context, reason identity.Reasoner, metas []*manifest.EntryMetadata, -) (*backupBase, *backupBase, error) { +) (*BackupBase, *BackupBase, error) { // Sort manifests by time so we can go through them sequentially. The code in // kopia appears to sort them already, but add sorting here just so we're not // reliant on undocumented behavior. @@ -166,8 +166,8 @@ func (b *baseFinder) findBasesInSet( }) var ( - mergeBase *backupBase - assistBase *backupBase + mergeBase *BackupBase + assistBase *BackupBase ) for i := len(metas) - 1; i >= 0; i-- { @@ -226,7 +226,7 @@ func (b *baseFinder) findBasesInSet( if b.isAssistBackupModel(ictx, bup) { if assistBase == nil { - assistBase = &backupBase{ + assistBase = &BackupBase{ Backup: bup, ItemDataSnapshot: man, Reasons: []identity.Reasoner{reason}, @@ -248,7 +248,7 @@ func (b *baseFinder) findBasesInSet( "search_snapshot_id", meta.ID, "ssid", ssid) - mergeBase = &backupBase{ + mergeBase = &BackupBase{ Backup: bup, ItemDataSnapshot: man, Reasons: []identity.Reasoner{reason}, @@ -304,7 +304,7 @@ func (b *baseFinder) getBase( ctx context.Context, r identity.Reasoner, tags map[string]string, -) (*backupBase, *backupBase, error) { +) (*BackupBase, *BackupBase, error) { allTags := map[string]string{} for _, k := range tagKeys(r) { @@ -336,8 +336,8 @@ func (b *baseFinder) FindBases( // Backup models and item data snapshot manifests are 1:1 for bases so just // track things by the backup ID. We need to track by ID so we can coalesce // the reason for selecting something. - mergeBases = map[model.StableID]backupBase{} - assistBases = map[model.StableID]backupBase{} + mergeBases = map[model.StableID]BackupBase{} + assistBases = map[model.StableID]BackupBase{} ) for _, searchReason := range reasons { @@ -379,45 +379,11 @@ func (b *baseFinder) FindBases( } } - // Convert what we got to the format that backupBases takes right now. - // TODO(ashmrtn): Remove when backupBases has consolidated fields. - res := &backupBases{} - bups := make([]BackupEntry, 0, len(mergeBases)) - snaps := make([]ManifestEntry, 0, len(mergeBases)) - - for _, base := range mergeBases { - bups = append(bups, BackupEntry{ - Backup: base.Backup, - Reasons: base.Reasons, - }) - - snaps = append(snaps, ManifestEntry{ - Manifest: base.ItemDataSnapshot, - Reasons: base.Reasons, - }) + res := &backupBases{ + mergeBases: maps.Values(mergeBases), + assistBases: maps.Values(assistBases), } - res.backups = bups - res.mergeBases = snaps - - bups = make([]BackupEntry, 0, len(assistBases)) - snaps = make([]ManifestEntry, 0, len(assistBases)) - - for _, base := range assistBases { - bups = append(bups, BackupEntry{ - Backup: base.Backup, - Reasons: base.Reasons, - }) - - snaps = append(snaps, ManifestEntry{ - Manifest: base.ItemDataSnapshot, - Reasons: base.Reasons, - }) - } - - res.assistBackups = bups - res.assistBases = snaps - res.fixupAndVerify(ctx) return res diff --git a/src/internal/kopia/mock_backup_base.go b/src/internal/kopia/mock_backup_base.go index cca5580a0..90a31ecf4 100644 --- a/src/internal/kopia/mock_backup_base.go +++ b/src/internal/kopia/mock_backup_base.go @@ -3,9 +3,88 @@ package kopia import ( "testing" + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/snapshot" "github.com/stretchr/testify/assert" + + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/pkg/backup" ) +// TODO(ashmrtn): Temp function until all PRs in the series merge. +func backupsMatch(t *testing.T, expect, got []BackupEntry, dataType string) { + expectBups := make([]*backup.Backup, 0, len(expect)) + gotBups := make([]*backup.Backup, 0, len(got)) + gotBasesByID := map[model.StableID]BackupEntry{} + + for _, e := range expect { + if e.Backup != nil { + expectBups = append(expectBups, e.Backup) + } + } + + for _, g := range got { + if g.Backup != nil { + gotBups = append(gotBups, g.Backup) + gotBasesByID[g.Backup.ID] = g + } + } + + assert.ElementsMatch(t, expectBups, gotBups, dataType+" backup model") + + // Need to compare Reasons separately since they're also a slice. + for _, e := range expect { + if e.Backup == nil { + continue + } + + b, ok := gotBasesByID[e.Backup.ID] + if !ok { + // Missing bases will be reported above. + continue + } + + assert.ElementsMatch(t, e.Reasons, b.Reasons) + } +} + +// TODO(ashmrtn): Temp function until all PRs in the series merge. +func manifestsMatch(t *testing.T, expect, got []ManifestEntry, dataType string) { + expectMans := make([]*snapshot.Manifest, 0, len(expect)) + gotMans := make([]*snapshot.Manifest, 0, len(got)) + gotBasesByID := map[manifest.ID]ManifestEntry{} + + for _, e := range expect { + if e.Manifest != nil { + expectMans = append(expectMans, e.Manifest) + } + } + + for _, g := range got { + if g.Manifest != nil { + gotMans = append(gotMans, g.Manifest) + gotBasesByID[g.Manifest.ID] = g + } + } + + assert.ElementsMatch(t, expectMans, gotMans, dataType+" item data snapshot") + + // Need to compare Reasons separately since they're also a slice. + for _, e := range expect { + if e.Manifest == nil { + continue + } + + b, ok := gotBasesByID[e.Manifest.ID] + if !ok { + // Missing bases will be reported above. + continue + } + + assert.ElementsMatch(t, e.Reasons, b.Reasons) + } +} + func AssertBackupBasesEqual(t *testing.T, expect, got BackupBases) { if expect == nil && got == nil { return @@ -33,11 +112,11 @@ func AssertBackupBasesEqual(t *testing.T, expect, got BackupBases) { return } - assert.ElementsMatch(t, expect.Backups(), got.Backups(), "backups") - assert.ElementsMatch(t, expect.MergeBases(), got.MergeBases(), "merge bases") - assert.ElementsMatch(t, expect.UniqueAssistBackups(), got.UniqueAssistBackups(), "assist backups") - assert.ElementsMatch(t, expect.UniqueAssistBases(), got.UniqueAssistBases(), "assist bases") - assert.ElementsMatch(t, expect.SnapshotAssistBases(), got.SnapshotAssistBases(), "snapshot assist bases") + backupsMatch(t, expect.Backups(), got.Backups(), "merge backups") + manifestsMatch(t, expect.MergeBases(), got.MergeBases(), "merge manifests") + backupsMatch(t, expect.UniqueAssistBackups(), got.UniqueAssistBackups(), "assist backups") + manifestsMatch(t, expect.UniqueAssistBases(), got.UniqueAssistBases(), "assist manifests") + manifestsMatch(t, expect.SnapshotAssistBases(), got.SnapshotAssistBases(), "snapshot assist bases") } func NewMockBackupBases() *MockBackupBases { @@ -49,22 +128,63 @@ type MockBackupBases struct { } func (bb *MockBackupBases) WithBackups(b ...BackupEntry) *MockBackupBases { - bb.backupBases.backups = append(bb.Backups(), b...) + bases := make([]BackupBase, 0, len(b)) + for _, base := range b { + bases = append(bases, BackupBase{ + Backup: base.Backup, + Reasons: base.Reasons, + }) + } + + bb.backupBases.mergeBases = append(bb.NewMergeBases(), bases...) + return bb } func (bb *MockBackupBases) WithMergeBases(m ...ManifestEntry) *MockBackupBases { - bb.backupBases.mergeBases = append(bb.MergeBases(), m...) + bases := make([]BackupBase, 0, len(m)) + for _, base := range m { + bases = append(bases, BackupBase{ + ItemDataSnapshot: base.Manifest, + Reasons: base.Reasons, + }) + } + + bb.backupBases.mergeBases = append(bb.NewMergeBases(), bases...) + return bb } func (bb *MockBackupBases) WithAssistBackups(b ...BackupEntry) *MockBackupBases { - bb.backupBases.assistBackups = append(bb.UniqueAssistBackups(), b...) + bases := make([]BackupBase, 0, len(b)) + for _, base := range b { + bases = append(bases, BackupBase{ + Backup: base.Backup, + Reasons: base.Reasons, + }) + } + + bb.backupBases.assistBases = append(bb.NewUniqueAssistBases(), bases...) + return bb } func (bb *MockBackupBases) WithAssistBases(m ...ManifestEntry) *MockBackupBases { - bb.backupBases.assistBases = append(bb.UniqueAssistBases(), m...) + bases := make([]BackupBase, 0, len(m)) + for _, base := range m { + bases = append(bases, BackupBase{ + ItemDataSnapshot: base.Manifest, + Reasons: base.Reasons, + }) + } + + bb.backupBases.assistBases = append(bb.NewUniqueAssistBases(), bases...) + + return bb +} + +func (bb *MockBackupBases) NewWithMergeBases(b ...BackupBase) *MockBackupBases { + bb.backupBases.mergeBases = append(bb.NewMergeBases(), b...) return bb } diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index 1f3c9df48..82669bbf8 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -639,8 +639,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb } } - makeBackup := func(ro, snapID string, cats ...path.CategoryType) kopia.BackupEntry { - return kopia.BackupEntry{ + makeBackupBase := func(ro, snapID, incmpl string, cats ...path.CategoryType) kopia.BackupBase { + return kopia.BackupBase{ Backup: &backup.Backup{ BaseModel: model.BaseModel{ ID: model.StableID(snapID + "bup"), @@ -648,6 +648,11 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb SnapshotID: snapID, StreamStoreID: snapID + "store", }, + ItemDataSnapshot: &snapshot.Manifest{ + ID: manifest.ID(snapID), + IncompleteReason: incmpl, + Tags: map[string]string{"tag:" + kopia.TagBackupID: snapID + "bup"}, + }, Reasons: buildReasons(tid, ro, path.ExchangeService, cats...), } } @@ -682,8 +687,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb bf: &mockBackupFinder{ data: map[string]kopia.BackupBases{ fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{}, @@ -693,17 +697,15 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.False, expectDCS: nil, expectMans: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)). MockDisableMergeBases(), }, { name: "only fallbacks", bf: &mockBackupFinder{ data: map[string]kopia.BackupBases{ - fbro: kopia.NewMockBackupBases().WithMergeBases( - makeMan(fbro, "fb_id1", "", path.EmailCategory)).WithBackups( - makeBackup(fbro, "fb_id1", path.EmailCategory)), + fbro: kopia.NewMockBackupBases(). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{ @@ -717,16 +719,14 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, { name: "only fallbacks, drop assist", bf: &mockBackupFinder{ data: map[string]kopia.BackupBases{ fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{ @@ -741,8 +741,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)). MockDisableAssistBases(), }, { @@ -752,8 +751,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases(). WithMergeBases(makeMan(ro, "id1", "", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{ @@ -804,8 +802,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb WithMergeBases(makeMan(ro, "id1", "", path.EmailCategory)). WithAssistBases(makeMan(ro, "id2", "checkpoint", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)). WithAssistBases(makeMan(fbro, "fb_id2", "checkpoint", path.EmailCategory)), }, }, @@ -834,8 +831,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases().WithAssistBases( makeMan(ro, "id2", "checkpoint", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{ @@ -851,8 +847,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)). WithAssistBases(makeMan(ro, "id2", "checkpoint", path.EmailCategory)), }, { @@ -862,8 +857,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases().WithAssistBases( makeMan(ro, "id2", "checkpoint", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)), }, }, rp: mockRestoreProducer{ @@ -880,8 +874,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory)). MockDisableAssistBases(), }, { @@ -916,8 +909,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases().WithMergeBases( makeMan(ro, "id1", "", path.EmailCategory, path.ContactsCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.EmailCategory, path.ContactsCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.EmailCategory, path.ContactsCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory, path.ContactsCategory)), }, }, rp: mockRestoreProducer{ @@ -948,8 +940,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases().WithMergeBases( makeMan(ro, "id1", "", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases(makeMan(fbro, "fb_id1", "", path.ContactsCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.ContactsCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.ContactsCategory)), }, }, rp: mockRestoreProducer{ @@ -967,10 +958,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "id1"}, {id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases( - makeMan(ro, "id1", "", path.EmailCategory), - makeMan(fbro, "fb_id1", "", path.ContactsCategory)). - WithBackups(makeBackup(fbro, "fb_id1", path.ContactsCategory)), + WithMergeBases(makeMan(ro, "id1", "", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.ContactsCategory)), }, { name: "complete mans and complete fallbacks, fallback has superset of reasons", @@ -979,10 +968,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb ro: kopia.NewMockBackupBases().WithMergeBases( makeMan(ro, "id1", "", path.EmailCategory)), fbro: kopia.NewMockBackupBases(). - WithMergeBases( - makeMan(fbro, "fb_id1", "", path.EmailCategory, path.ContactsCategory)). - WithBackups( - makeBackup(fbro, "fb_id1", path.EmailCategory, path.ContactsCategory)), + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.EmailCategory, path.ContactsCategory)), }, }, rp: mockRestoreProducer{ @@ -1004,11 +990,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_Fallb assertB: assert.True, expectDCS: []mockColl{{id: "id1"}, {id: "fb_id1"}}, expectMans: kopia.NewMockBackupBases(). - WithMergeBases( - makeMan(ro, "id1", "", path.EmailCategory), - makeMan(fbro, "fb_id1", "", path.ContactsCategory)). - WithBackups( - makeBackup(fbro, "fb_id1", path.ContactsCategory)), + WithMergeBases(makeMan(ro, "id1", "", path.EmailCategory)). + NewWithMergeBases(makeBackupBase(fbro, "fb_id1", "", path.ContactsCategory)), }, }