From 24b9831ce69b990b811039691e3d85d3e1d8d904 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 25 Aug 2023 12:05:25 -0700 Subject: [PATCH] Cleanup old assist bases (#4081) Add helper functions and tests that find old assist bases and add them to the set of backups to garbage collect. This allows us to remove backups that aren't displayed to the user because they had some errors during them The more recent assist backups can still be used for incremental backups so we don't want to garbage collect all of them This still does not wire this code into any existing corso function, just adds additional cleanup logic --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/cleanup_backups.go | 217 +++++++++++++- src/internal/kopia/cleanup_backups_test.go | 322 +++++++++++++++++++++ 2 files changed, 530 insertions(+), 9 deletions(-) diff --git a/src/internal/kopia/cleanup_backups.go b/src/internal/kopia/cleanup_backups.go index 82ae04dc4..0e789a217 100644 --- a/src/internal/kopia/cleanup_backups.go +++ b/src/internal/kopia/cleanup_backups.go @@ -3,12 +3,14 @@ package kopia import ( "context" "errors" + "strings" "time" "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" @@ -17,6 +19,12 @@ import ( "github.com/alcionai/corso/src/pkg/store" ) +const ( + serviceCatTagPrefix = "sc-" + kopiaPathLabel = "path" + tenantTag = "tenant" +) + // cleanupOrphanedData uses bs and mf to lookup all models/snapshots for backups // and deletes items that are older than nowFunc() - gcBuffer (cutoff) that are // not "complete" backups with: @@ -67,8 +75,11 @@ func cleanupOrphanedData( // 1. check if there's a corresponding backup for them // 2. delete the details if they're orphaned deets = map[manifest.ID]struct{}{} - // dataSnaps is a hash set of the snapshot IDs for item data snapshots. - dataSnaps = map[manifest.ID]struct{}{} + // dataSnaps is a hash map of the snapshot IDs for item data snapshots. + dataSnaps = map[manifest.ID]*manifest.EntryMetadata{} + // toDelete is the set of objects to delete from kopia. It starts out with + // all items and has ineligible items removed from it. + toDelete = map[manifest.ID]struct{}{} ) cutoff := nowFunc().Add(-gcBuffer) @@ -81,9 +92,11 @@ func cleanupOrphanedData( continue } + toDelete[snap.ID] = struct{}{} + k, _ := makeTagKV(TagBackupCategory) if _, ok := snap.Labels[k]; ok { - dataSnaps[snap.ID] = struct{}{} + dataSnaps[snap.ID] = snap continue } @@ -106,6 +119,7 @@ func cleanupOrphanedData( } deets[d.ModelStoreID] = struct{}{} + toDelete[d.ModelStoreID] = struct{}{} } // Get all backup models. @@ -114,8 +128,11 @@ func cleanupOrphanedData( return clues.Wrap(err, "getting all backup models") } - toDelete := maps.Clone(deets) - maps.Copy(toDelete, dataSnaps) + // assistBackups is the set of backups that have a + // * a label denoting they're an assist backup + // * item data snapshot + // * details snapshot + var assistBackups []*backup.Backup for _, bup := range bups { // Don't even try to see if this needs garbage collected because it's not @@ -162,7 +179,7 @@ func cleanupOrphanedData( ssid = bm.DetailsID } - _, dataOK := dataSnaps[manifest.ID(bm.SnapshotID)] + d, dataOK := dataSnaps[manifest.ID(bm.SnapshotID)] _, deetsOK := deets[manifest.ID(ssid)] // All data is present, we shouldn't garbage collect this backup. @@ -170,6 +187,33 @@ func cleanupOrphanedData( delete(toDelete, bup.ModelStoreID) delete(toDelete, manifest.ID(bm.SnapshotID)) delete(toDelete, manifest.ID(ssid)) + + // Add to the assist backup set so that we can attempt to garbage collect + // older assist backups below. + if bup.Tags[model.BackupTypeTag] == model.AssistBackup { + // This is a little messy to have, but can simplify the logic below. + // The state of tagging in corso isn't all that great right now and we'd + // really like to consolidate tags and clean them up. For now, we're + // going to copy tags that are related to Reasons for a backup from the + // item data snapshot to the backup model. This makes the function + // checking if assist backups should be garbage collected a bit easier + // because now they only have to source data from backup models. + if err := transferTags(d, &bm); err != nil { + logger.Ctx(ctx).Debugw( + "transferring legacy tags to backup model", + "err", err, + "snapshot_id", d.ID, + "backup_id", bup.ID) + + // Continuing here means the base won't be eligible for old assist + // base garbage collection. We could add more logic to eventually + // delete the base in question but I don't really expect to see + // failures when transferring tags. + continue + } + + assistBackups = append(assistBackups, &bm) + } } } @@ -178,14 +222,169 @@ func cleanupOrphanedData( "num_items", len(toDelete), "kopia_ids", maps.Keys(toDelete)) + // This will technically save a superset of the assist bases we should keep. + // The reason for that is that we only add something to the set of assist + // bases after we've excluded backups in the buffer time zone. For example + // we could discover that of the set of assist bases we have, something is + // the youngest and exclude it from gabage collection. However, when looking + // at the set of all assist bases, including those in the buffer zone, it's + // possible the one we thought was the youngest actually isn't and could be + // garbage collected. + // + // This sort of edge case will ideally happen only for a few assist bases at + // a time. Assuming this function is run somewhat periodically, missing these + // edge cases is alright because they'll get picked up on a subsequent run. + assistItems := collectOldAssistBases(ctx, assistBackups) + + logger.Ctx(ctx).Debugw( + "garbage collecting old assist bases", + "assist_num_items", len(assistItems), + "assist_kopia_ids", assistItems) + + assistItems = append(assistItems, maps.Keys(toDelete)...) + // Use single atomic batch delete operation to cleanup to keep from making a // bunch of manifest content blobs. - if err := bs.DeleteWithModelStoreIDs(ctx, maps.Keys(toDelete)...); err != nil { + if err := bs.DeleteWithModelStoreIDs(ctx, assistItems...); err != nil { return clues.Wrap(err, "deleting orphaned data") } - // TODO(ashmrtn): Do some pruning of assist backup models so we don't keep - // them around forever. + return nil +} + +var skipKeys = []string{ + TagBackupID, + TagBackupCategory, +} + +func transferTags(snap *manifest.EntryMetadata, bup *backup.Backup) error { + tenant, err := decodeElement(snap.Labels[kopiaPathLabel]) + if err != nil { + return clues.Wrap(err, "decoding tenant from label") + } + + bup.Tags[tenantTag] = tenant + + skipTags := map[string]struct{}{} + + for _, k := range skipKeys { + key, _ := makeTagKV(k) + skipTags[key] = struct{}{} + } + + // Safe to check only this because the old field was deprecated prior to the + // tagging of assist backups and this function only deals with assist + // backups. + roid := bup.ProtectedResourceID + + roidK, _ := makeTagKV(roid) + skipTags[roidK] = struct{}{} + + // This is hacky, but right now we don't have a good way to get only the + // Reason tags for something. We can however, find them by searching for all + // the "normalized" tags and then discarding the ones we know aren't + // reasons. Unfortunately this won't work if custom tags are added to the + // backup that we don't know about. + // + // Convert them to the newer format that we'd like to have where the + // service/category tags have the form "sc-". + for tag := range snap.Labels { + if _, ok := skipTags[tag]; ok || !strings.HasPrefix(tag, userTagPrefix) { + continue + } + + bup.Tags[strings.Replace(tag, userTagPrefix, serviceCatTagPrefix, 1)] = "0" + } return nil } + +func collectOldAssistBases( + ctx context.Context, + bups []*backup.Backup, +) []manifest.ID { + // maybeDelete is the set of backups that could be deleted. It starts out as + // the set of all backups and has ineligible backups removed from it. + maybeDelete := map[manifest.ID]*backup.Backup{} + // Figure out which backups have overlapping reasons. A single backup can + // appear in multiple slices in the map, one for each Reason associated with + // it. + bupsByReason := map[string][]*backup.Backup{} + + for _, bup := range bups { + // Safe to pull from this field since assist backups came after we switched + // to using ProtectedResourceID. + roid := bup.ProtectedResourceID + + tenant := bup.Tags[tenantTag] + if len(tenant) == 0 { + // We can skip this backup. It won't get garbage collected, but it also + // won't result in incorrect behavior overall. + logger.Ctx(ctx).Infow("missing tenant tag in backup", "backup_id", bup.ID) + continue + } + + maybeDelete[manifest.ID(bup.ModelStoreID)] = bup + + for tag := range bup.Tags { + if strings.HasPrefix(tag, serviceCatTagPrefix) { + // Precise way we concatenate all this info doesn't really matter as + // long as it's consistent for all backups in the set and includes all + // the pieces we need to ensure uniqueness across. + fullTag := tenant + roid + tag + bupsByReason[fullTag] = append(bupsByReason[fullTag], bup) + } + } + } + + // For each set of backups we found, sort them by time. Mark all but the + // youngest backup in each group as eligible for garbage collection. + // + // We implement this process as removing backups from the set of potential + // backups to delete because it's possible for a backup to to not be the + // youngest for one Reason but be the youngest for a different Reason (i.e. + // most recent exchange mail backup but not the most recent exchange + // contacts backup). A simple delete operation in the map is sufficient to + // remove a backup even if it's only the youngest for a single Reason. + // Otherwise we'd need to do another pass after this to determine the + // isYoungest status for all Reasons in the backup. + // + // TODO(ashmrtn): Handle concurrent backups somehow? Right now backups that + // have overlapping start and end times aren't explicitly handled. + for _, bupSet := range bupsByReason { + if len(bupSet) == 0 { + continue + } + + // Sort in reverse chronological order so that we can just remove the zeroth + // item from the delete set instead of getting the slice length. + // Unfortunately this could also put us in the pathologic case where almost + // all items need swapped since in theory kopia returns results in + // chronologic order and we're processing them in the order kopia returns + // them. + slices.SortStableFunc(bupSet, func(a, b *backup.Backup) int { + return -a.CreationTime.Compare(b.CreationTime) + }) + + delete(maybeDelete, manifest.ID(bupSet[0].ModelStoreID)) + } + + res := make([]manifest.ID, 0, 3*len(maybeDelete)) + + // For all items remaining in the delete set, generate the final set of items + // to delete. This set includes the data snapshot ID, details snapshot ID, and + // backup model ID to delete for each backup. + for bupID, bup := range maybeDelete { + // Don't need to check if we use StreamStoreID or DetailsID because + // DetailsID was deprecated prior to tagging backups as assist backups. + // Since the input set is only assist backups there's no overlap between the + // two implementations. + res = append( + res, + bupID, + manifest.ID(bup.SnapshotID), + manifest.ID(bup.StreamStoreID)) + } + + return res +} diff --git a/src/internal/kopia/cleanup_backups_test.go b/src/internal/kopia/cleanup_backups_test.go index ecd36848d..89c7d9f20 100644 --- a/src/internal/kopia/cleanup_backups_test.go +++ b/src/internal/kopia/cleanup_backups_test.go @@ -15,6 +15,8 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/identity" + "github.com/alcionai/corso/src/pkg/path" ) type BackupCleanupUnitSuite struct { @@ -163,6 +165,58 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { } } + bupCurrent2 := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("current-bup-id-2"), + ModelStoreID: manifest.ID("current-bup-msid-2"), + }, + SnapshotID: "current-snap-msid-2", + StreamStoreID: "current-deets-msid-2", + } + } + + snapCurrent2 := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-snap-msid-2", + Labels: map[string]string{ + backupTag: "0", + }, + } + } + + deetsCurrent2 := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-deets-msid-2", + } + } + + bupCurrent3 := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("current-bup-id-3"), + ModelStoreID: manifest.ID("current-bup-msid-3"), + }, + SnapshotID: "current-snap-msid-3", + StreamStoreID: "current-deets-msid-3", + } + } + + snapCurrent3 := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-snap-msid-3", + Labels: map[string]string{ + backupTag: "0", + }, + } + } + + deetsCurrent3 := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-deets-msid-3", + } + } + // Legacy backup with details in separate model. bupLegacy := func() *backup.Backup { return &backup.Backup{ @@ -261,9 +315,51 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { return &res } + manifestWithReasons := func( + m *manifest.EntryMetadata, + tenantID string, + reasons ...identity.Reasoner, + ) *manifest.EntryMetadata { + res := *m + + if res.Labels == nil { + res.Labels = map[string]string{} + } + + res.Labels[kopiaPathLabel] = encodeAsPath(tenantID) + + // Add the given reasons. + for _, r := range reasons { + for _, k := range tagKeys(r) { + key, _ := makeTagKV(k) + res.Labels[key] = "0" + } + } + + // Also add other common reasons on item data snapshots. + k, _ := makeTagKV(TagBackupCategory) + res.Labels[k] = "0" + + return &res + } + backupWithTime := func(mt time.Time, b *backup.Backup) *backup.Backup { res := *b res.ModTime = mt + res.CreationTime = mt + + return &res + } + + backupAssist := func(protectedResource string, b *backup.Backup) *backup.Backup { + res := *b + res.ProtectedResourceID = protectedResource + + if res.Tags == nil { + res.Tags = map[string]string{} + } + + res.Tags[model.BackupTypeTag] = model.AssistBackup return &res } @@ -529,6 +625,232 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { buffer: 24 * time.Hour, expectErr: assert.NoError, }, + // Tests dealing with assist base cleanup. + { + // Test that even if we have multiple assist bases with the same + // Reason(s), none of them are garbage collected if they are within the + // buffer period used to exclude recently created backups from garbage + // collection. + name: "AssistBase NotYoungest InBufferTime Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + }, + time: baseTime, + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that an assist base that has the same Reasons as a newer assist + // base is garbage collected when it's outside the buffer period. + name: "AssistBases NotYoungest CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Minute), snapCurrent3()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Minute), deetsCurrent3()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Minute), bupCurrent3()))}, + }, + expectDeleteIDs: []manifest.ID{ + snapCurrent().ID, + deetsCurrent().ID, + manifest.ID(bupCurrent().ModelStoreID), + snapCurrent2().ID, + deetsCurrent2().ID, + manifest.ID(bupCurrent2().ModelStoreID), + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that the most recent assist base is not garbage collected even if + // there's a newer merge base that has the same Reasons as the assist + // base. Also ensure assist bases with the same Reasons that are older + // than the newest assist base are still garbage collected. + name: "AssistBasesAndMergeBase NotYoungest CausesCleanupForAssistBase", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Minute), snapCurrent3()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Minute), deetsCurrent3()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + {bup: backupWithTime(baseTime.Add(time.Minute), bupCurrent3())}, + }, + expectDeleteIDs: []manifest.ID{ + snapCurrent().ID, + deetsCurrent().ID, + manifest.ID(bupCurrent().ModelStoreID), + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that an assist base that is not the most recent for Reason A but + // is the most recent for Reason B is not garbage collected. + name: "AssistBases YoungestInOneReason Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory), + NewReason("", "ro", path.ExchangeService, path.ContactsCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that assist bases that have the same tenant, service, and category + // but different protected resources are not garbage collected. This is + // a test to ensure the Reason field is properly handled when finding the + // most recent assist base. + name: "AssistBases DifferentProtectedResources Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro1", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro2", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + }, + backups: []backupRes{ + {bup: backupAssist("ro1", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro2", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that assist bases that have the same protected resource, service, + // and category but different tenants are not garbage collected. This is a + // test to ensure the Reason field is properly handled when finding the + // most recent assist base. + name: "AssistBases DifferentTenants Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant2", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + // Test that if the tenant is not available for a given assist base that + // it's excluded from the garbage collection set. This behavior is + // conservative because it's quite likely that we could garbage collect + // the base without issue. + name: "AssistBases NoTenant SkipsBackup", + snapshots: []*manifest.EntryMetadata{ + manifestWithReasons( + manifestWithTime(baseTime, snapCurrent()), + "", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime, deetsCurrent()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Second), snapCurrent2()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Second), deetsCurrent2()), + + manifestWithReasons( + manifestWithTime(baseTime.Add(time.Minute), snapCurrent3()), + "tenant1", + NewReason("", "ro", path.ExchangeService, path.EmailCategory)), + manifestWithTime(baseTime.Add(time.Minute), deetsCurrent3()), + }, + backups: []backupRes{ + {bup: backupAssist("ro", backupWithTime(baseTime, bupCurrent()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Second), bupCurrent2()))}, + {bup: backupAssist("ro", backupWithTime(baseTime.Add(time.Minute), bupCurrent3()))}, + }, + time: baseTime.Add(48 * time.Hour), + buffer: 24 * time.Hour, + expectDeleteIDs: []manifest.ID{ + snapCurrent2().ID, + deetsCurrent2().ID, + manifest.ID(bupCurrent2().ModelStoreID), + }, + expectErr: assert.NoError, + }, } for _, test := range table {