Refactor backup code to use BackupBases functions (#3596)

Now that BackupBases defines functions, leverage them
in other code to reduce the number of times we fetch
Backup models and leverage the stronger invariants
the new FindBases function has

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [ ] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [x] 🧹 Tech Debt/Cleanup

#### Issue(s)

* #3525

#### Test Plan

- [ ] 💪 Manual
- [x]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-06-15 10:43:18 -07:00 committed by GitHub
parent cfbed454ea
commit c0f428ddc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 868 additions and 1630 deletions

View File

@ -249,6 +249,8 @@ func (b *baseFinder) findBasesInSet(
// If we've made it to this point then we're considering the backup // If we've made it to this point then we're considering the backup
// complete as it has both an item data snapshot and a backup details // complete as it has both an item data snapshot and a backup details
// snapshot. // snapshot.
logger.Ctx(ictx).Infow("found complete backup", "base_backup_id", bup.ID)
me := ManifestEntry{ me := ManifestEntry{
Manifest: man, Manifest: man,
Reasons: []Reason{reason}, Reasons: []Reason{reason},
@ -293,11 +295,11 @@ func (b *baseFinder) getBase(
return b.findBasesInSet(ctx, reason, metas) return b.findBasesInSet(ctx, reason, metas)
} }
func (b *baseFinder) findBases( func (b *baseFinder) FindBases(
ctx context.Context, ctx context.Context,
reasons []Reason, reasons []Reason,
tags map[string]string, tags map[string]string,
) (backupBases, error) { ) BackupBases {
var ( var (
// All maps go from ID -> entry. We need to track by ID so we can coalesce // All maps go from ID -> entry. We need to track by ID so we can coalesce
// the reason for selecting something. Kopia assisted snapshots also use // the reason for selecting something. Kopia assisted snapshots also use
@ -361,24 +363,13 @@ func (b *baseFinder) findBases(
} }
} }
return backupBases{ res := &backupBases{
backups: maps.Values(baseBups), backups: maps.Values(baseBups),
mergeBases: maps.Values(baseSnaps), mergeBases: maps.Values(baseSnaps),
assistBases: maps.Values(kopiaAssistSnaps), assistBases: maps.Values(kopiaAssistSnaps),
}, nil
} }
func (b *baseFinder) FindBases( res.fixupAndVerify(ctx)
ctx context.Context,
reasons []Reason,
tags map[string]string,
) ([]ManifestEntry, error) {
bb, err := b.findBases(ctx, reasons, tags)
if err != nil {
return nil, clues.Stack(err)
}
// assistBases contains all snapshots so we can return it while maintaining return res
// almost all compatibility.
return bb.assistBases, nil
} }

View File

@ -5,11 +5,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
@ -332,8 +330,7 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
}, },
} }
bb, err := bf.findBases(ctx, reasons, nil) bb := bf.FindBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases()) assert.Empty(t, bb.MergeBases())
assert.Empty(t, bb.AssistBases()) assert.Empty(t, bb.AssistBases())
} }
@ -356,8 +353,7 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
}, },
} }
bb, err := bf.findBases(ctx, reasons, nil) bb := bf.FindBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases()) assert.Empty(t, bb.MergeBases())
assert.Empty(t, bb.AssistBases()) assert.Empty(t, bb.AssistBases())
} }
@ -817,11 +813,10 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
bg: &mockModelGetter{data: test.backupData}, bg: &mockModelGetter{data: test.backupData},
} }
bb, err := bf.findBases( bb := bf.FindBases(
ctx, ctx,
test.input, test.input,
nil) nil)
require.NoError(t, err, "getting bases: %v", clues.ToCore(err))
checkBackupEntriesMatch( checkBackupEntriesMatch(
t, t,
@ -912,11 +907,10 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
bg: &mockModelGetter{data: backupData}, bg: &mockModelGetter{data: backupData},
} }
bb, err := bf.findBases( bb := bf.FindBases(
ctx, ctx,
testAllUsersAllCats, testAllUsersAllCats,
test.tags) test.tags)
require.NoError(t, err, "getting bases: %v", clues.ToCore(err))
checkManifestEntriesMatch( checkManifestEntriesMatch(
t, t,

View File

@ -39,6 +39,6 @@ type (
ctx context.Context, ctx context.Context,
reasons []kopia.Reason, reasons []kopia.Reason,
tags map[string]string, tags map[string]string,
) ([]kopia.ManifestEntry, error) ) kopia.BackupBases
} }
) )

View File

@ -6,6 +6,7 @@ import (
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/kopia/kopia/repo/manifest"
"github.com/alcionai/corso/src/internal/common/crash" "github.com/alcionai/corso/src/internal/common/crash"
"github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/dttm"
@ -296,20 +297,10 @@ func (op *BackupOperation) do(
return nil, clues.Stack(err) return nil, clues.Stack(err)
} }
type baseFinder struct {
kinject.BaseFinder
kinject.RestoreProducer
}
bf := baseFinder{
BaseFinder: kbf,
RestoreProducer: op.kopia,
}
mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata( mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(
ctx, ctx,
bf, kbf,
op.store, op.kopia,
reasons, fallbackReasons, reasons, fallbackReasons,
op.account.ID(), op.account.ID(),
op.incremental) op.incremental)
@ -318,10 +309,7 @@ func (op *BackupOperation) do(
} }
if canUseMetaData { if canUseMetaData {
_, lastBackupVersion, err = lastCompleteBackups(ctx, op.store, mans) lastBackupVersion = mans.MinBackupVersion()
if err != nil {
return nil, clues.Wrap(err, "retrieving prior backups")
}
} }
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections( cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
@ -358,9 +346,8 @@ func (op *BackupOperation) do(
err = mergeDetails( err = mergeDetails(
ctx, ctx,
op.store,
detailsStore, detailsStore,
mans, mans.Backups(),
toMerge, toMerge,
deets, deets,
writeStats, writeStats,
@ -482,7 +469,7 @@ func consumeBackupCollections(
bc kinject.BackupConsumer, bc kinject.BackupConsumer,
tenantID string, tenantID string,
reasons []kopia.Reason, reasons []kopia.Reason,
mans []kopia.ManifestEntry, bbs kopia.BackupBases,
cs []data.BackupCollection, cs []data.BackupCollection,
pmr prefixmatcher.StringSetReader, pmr prefixmatcher.StringSetReader,
backupID model.StableID, backupID model.StableID,
@ -506,9 +493,24 @@ func consumeBackupCollections(
} }
} }
bases := make([]kopia.IncrementalBase, 0, len(mans)) // AssistBases should be the upper bound for how many snapshots we pass in.
bases := make([]kopia.IncrementalBase, 0, len(bbs.AssistBases()))
// Track IDs we've seen already so we don't accidentally duplicate some
// manifests. This can be removed when we move the code below into the kopia
// package.
ids := map[manifest.ID]struct{}{}
for _, m := range mans { var mb []kopia.ManifestEntry
if bbs != nil {
mb = bbs.MergeBases()
}
// TODO(ashmrtn): Make a wrapper for Reson that allows adding a tenant and
// make a function that will spit out a prefix that includes the tenant. With
// that done this code can be moved to kopia wrapper since it's really more
// specific to that.
for _, m := range mb {
paths := make([]*path.Builder, 0, len(m.Reasons)) paths := make([]*path.Builder, 0, len(m.Reasons))
services := map[string]struct{}{} services := map[string]struct{}{}
categories := map[string]struct{}{} categories := map[string]struct{}{}
@ -524,6 +526,8 @@ func consumeBackupCollections(
categories[reason.Category.String()] = struct{}{} categories[reason.Category.String()] = struct{}{}
} }
ids[m.ID] = struct{}{}
bases = append(bases, kopia.IncrementalBase{ bases = append(bases, kopia.IncrementalBase{
Manifest: m.Manifest, Manifest: m.Manifest,
SubtreePaths: paths, SubtreePaths: paths,
@ -552,6 +556,18 @@ func consumeBackupCollections(
"base_backup_id", mbID) "base_backup_id", mbID)
} }
// At the moment kopia assisted snapshots are in the same set as merge bases.
// When we fixup generating subtree paths we can remove this.
if bbs != nil {
for _, ab := range bbs.AssistBases() {
if _, ok := ids[ab.ID]; ok {
continue
}
bases = append(bases, kopia.IncrementalBase{Manifest: ab.Manifest})
}
}
kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections( kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections(
ctx, ctx,
bases, bases,
@ -663,61 +679,10 @@ func getNewPathRefs(
return newPath, newLoc, updated, nil return newPath, newLoc, updated, nil
} }
func lastCompleteBackups(
ctx context.Context,
ms *store.Wrapper,
mans []kopia.ManifestEntry,
) (map[string]*backup.Backup, int, error) {
var (
oldestVersion = version.NoBackup
result = map[string]*backup.Backup{}
)
if len(mans) == 0 {
return result, -1, nil
}
for _, man := range mans {
// For now skip snapshots that aren't complete. We will need to revisit this
// when we tackle restartability.
if len(man.IncompleteReason) > 0 {
continue
}
var (
mctx = clues.Add(ctx, "base_manifest_id", man.ID)
reasons = man.Reasons
)
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return result, oldestVersion, clues.New("no backup ID in snapshot manifest").WithClues(mctx)
}
mctx = clues.Add(mctx, "base_manifest_backup_id", bID)
bup, err := getBackupFromID(mctx, model.StableID(bID), ms)
if err != nil {
return result, oldestVersion, err
}
for _, r := range reasons {
result[r.Key()] = bup
}
if oldestVersion == -1 || bup.Version < oldestVersion {
oldestVersion = bup.Version
}
}
return result, oldestVersion, nil
}
func mergeDetails( func mergeDetails(
ctx context.Context, ctx context.Context,
ms *store.Wrapper,
detailsStore streamstore.Streamer, detailsStore streamstore.Streamer,
mans []kopia.ManifestEntry, backups []kopia.BackupEntry,
dataFromBackup kopia.DetailsMergeInfoer, dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder, deets *details.Builder,
writeStats *kopia.BackupStats, writeStats *kopia.BackupStats,
@ -738,29 +703,15 @@ func mergeDetails(
var addedEntries int var addedEntries int
for _, man := range mans { for _, baseBackup := range backups {
var ( var (
mctx = clues.Add(ctx, "base_manifest_id", man.ID) mctx = clues.Add(ctx, "base_backup_id", baseBackup.ID)
manifestAddedEntries int manifestAddedEntries int
) )
// For now skip snapshots that aren't complete. We will need to revisit this baseDeets, err := getDetailsFromBackup(
// when we tackle restartability.
if len(man.IncompleteReason) > 0 {
continue
}
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return clues.New("no backup ID in snapshot manifest").WithClues(mctx)
}
mctx = clues.Add(mctx, "base_manifest_backup_id", bID)
baseBackup, baseDeets, err := getBackupAndDetailsFromID(
mctx, mctx,
model.StableID(bID), baseBackup.Backup,
ms,
detailsStore, detailsStore,
errs) errs)
if err != nil { if err != nil {
@ -781,7 +732,7 @@ func mergeDetails(
// //
// TODO(ashmrtn): This logic will need expanded to cover entries from // TODO(ashmrtn): This logic will need expanded to cover entries from
// checkpoints if we start doing kopia-assisted incrementals for those. // checkpoints if we start doing kopia-assisted incrementals for those.
if !matchesReason(man.Reasons, rr) { if !matchesReason(baseBackup.Reasons, rr) {
continue continue
} }

View File

@ -232,10 +232,8 @@ func checkBackupIsInManifests(
bf, err := kw.NewBaseFinder(bo.store) bf, err := kw.NewBaseFinder(bo.store)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
mans, err := bf.FindBases(ctx, reasons, tags) mans := bf.FindBases(ctx, reasons, tags)
require.NoError(t, err, clues.ToCore(err)) for _, man := range mans.MergeBases() {
for _, man := range mans {
bID, ok := man.GetTag(kopia.TagBackupID) bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) { if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue continue

View File

@ -2,13 +2,11 @@ package operations
import ( import (
"context" "context"
"fmt"
stdpath "path" stdpath "path"
"testing" "testing"
"time" "time"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -128,77 +126,6 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections(
// ----- model store for backups // ----- model store for backups
type mockBackupStorer struct {
// Only using this to store backup models right now.
entries map[model.StableID]backup.Backup
}
func (mbs mockBackupStorer) Get(
ctx context.Context,
s model.Schema,
id model.StableID,
toPopulate model.Model,
) error {
ctx = clues.Add(
ctx,
"model_schema", s,
"model_id", id,
"model_type", fmt.Sprintf("%T", toPopulate))
if s != model.BackupSchema {
return clues.New("unexpected schema").WithClues(ctx)
}
r, ok := mbs.entries[id]
if !ok {
return clues.New("model not found").WithClues(ctx)
}
bu, ok := toPopulate.(*backup.Backup)
if !ok {
return clues.New("bad population type").WithClues(ctx)
}
*bu = r
return nil
}
func (mbs mockBackupStorer) Delete(context.Context, model.Schema, model.StableID) error {
return clues.New("not implemented")
}
func (mbs mockBackupStorer) DeleteWithModelStoreID(context.Context, manifest.ID) error {
return clues.New("not implemented")
}
func (mbs mockBackupStorer) GetIDsForType(
context.Context,
model.Schema,
map[string]string,
) ([]*model.BaseModel, error) {
return nil, clues.New("not implemented")
}
func (mbs mockBackupStorer) GetWithModelStoreID(
context.Context,
model.Schema,
manifest.ID,
model.Model,
) error {
return clues.New("not implemented")
}
func (mbs mockBackupStorer) Put(context.Context, model.Schema, model.Model) error {
return clues.New("not implemented")
}
func (mbs mockBackupStorer) Update(context.Context, model.Schema, model.Model) error {
return clues.New("not implemented")
}
// ----- model store for backups
type mockDetailsMergeInfoer struct { type mockDetailsMergeInfoer struct {
repoRefs map[string]path.Path repoRefs map[string]path.Path
locs map[string]*path.Builder locs map[string]*path.Builder
@ -260,27 +187,6 @@ func makeMetadataBasePath(
return p return p
} }
func makeMetadataPath(
t *testing.T,
tenant string,
service path.ServiceType,
resourceOwner string,
category path.CategoryType,
fileName string,
) path.Path {
t.Helper()
p, err := path.Builder{}.Append(fileName).ToServiceCategoryMetadataPath(
tenant,
resourceOwner,
service,
category,
true)
require.NoError(t, err, clues.ToCore(err))
return p
}
func makeFolderEntry( func makeFolderEntry(
t *testing.T, t *testing.T,
pb, loc *path.Builder, pb, loc *path.Builder,
@ -379,25 +285,6 @@ func makeDetailsEntry(
return res return res
} }
// TODO(ashmrtn): This should belong to some code that lives in the kopia
// package that is only compiled when running tests.
func makeKopiaTagKey(k string) string {
return "tag:" + k
}
func makeManifest(t *testing.T, backupID model.StableID, incompleteReason string) *snapshot.Manifest {
t.Helper()
tagKey := makeKopiaTagKey(kopia.TagBackupID)
return &snapshot.Manifest{
Tags: map[string]string{
tagKey: string(backupID),
},
IncompleteReason: incompleteReason,
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// unit tests // unit tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -533,19 +420,19 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
table := []struct { table := []struct {
name string name string
inputMan []kopia.ManifestEntry // Backup model is untouched in this test so there's no need to populate it.
input kopia.BackupBases
expected []kopia.IncrementalBase expected []kopia.IncrementalBase
}{ }{
{ {
name: "SingleManifestSingleReason", name: "SingleManifestSingleReason",
inputMan: []kopia.ManifestEntry{ input: kopia.NewMockBackupBases().WithMergeBases(
{ kopia.ManifestEntry{
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
emailReason, emailReason,
}, },
}, }).ClearMockAssistBases(),
},
expected: []kopia.IncrementalBase{ expected: []kopia.IncrementalBase{
{ {
Manifest: manifest1, Manifest: manifest1,
@ -557,15 +444,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
}, },
{ {
name: "SingleManifestMultipleReasons", name: "SingleManifestMultipleReasons",
inputMan: []kopia.ManifestEntry{ input: kopia.NewMockBackupBases().WithMergeBases(
{ kopia.ManifestEntry{
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
emailReason, emailReason,
contactsReason, contactsReason,
}, },
}, }).ClearMockAssistBases(),
},
expected: []kopia.IncrementalBase{ expected: []kopia.IncrementalBase{
{ {
Manifest: manifest1, Manifest: manifest1,
@ -578,22 +464,21 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
}, },
{ {
name: "MultipleManifestsMultipleReasons", name: "MultipleManifestsMultipleReasons",
inputMan: []kopia.ManifestEntry{ input: kopia.NewMockBackupBases().WithMergeBases(
{ kopia.ManifestEntry{
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
emailReason, emailReason,
contactsReason, contactsReason,
}, },
}, },
{ kopia.ManifestEntry{
Manifest: manifest2, Manifest: manifest2,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
emailReason, emailReason,
contactsReason, contactsReason,
}, },
}, }).ClearMockAssistBases(),
},
expected: []kopia.IncrementalBase{ expected: []kopia.IncrementalBase{
{ {
Manifest: manifest1, Manifest: manifest1,
@ -611,6 +496,33 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
}, },
}, },
}, },
{
name: "Single Manifest Single Reason With Assist Base",
input: kopia.NewMockBackupBases().WithMergeBases(
kopia.ManifestEntry{
Manifest: manifest1,
Reasons: []kopia.Reason{
emailReason,
},
}).WithAssistBases(
kopia.ManifestEntry{
Manifest: manifest2,
Reasons: []kopia.Reason{
contactsReason,
},
}),
expected: []kopia.IncrementalBase{
{
Manifest: manifest1,
SubtreePaths: []*path.Builder{
emailBuilder,
},
},
{
Manifest: manifest2,
},
},
},
} }
for _, test := range table { for _, test := range table {
@ -637,7 +549,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
mbu, mbu,
tenant, tenant,
nil, nil,
test.inputMan, test.input,
nil, nil,
nil, nil,
model.StableID(""), model.StableID(""),
@ -731,9 +643,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
table := []struct { table := []struct {
name string name string
populatedModels map[model.StableID]backup.Backup
populatedDetails map[string]*details.Details populatedDetails map[string]*details.Details
inputMans []kopia.ManifestEntry inputBackups []kopia.BackupEntry
mdm *mockDetailsMergeInfoer mdm *mockDetailsMergeInfoer
errCheck assert.ErrorAssertionFunc errCheck assert.ErrorAssertionFunc
@ -752,24 +663,6 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
// Use empty slice so we don't error out on nil != empty. // Use empty slice so we don't error out on nil != empty.
expectedEntries: []*details.Entry{}, expectedEntries: []*details.Entry{},
}, },
{
name: "BackupIDNotFound",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.add(itemPath1, itemPath1, locationPath1)
return res
}(),
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), "foo", ""),
Reasons: []kopia.Reason{
pathReason1,
},
},
},
errCheck: assert.Error,
},
{ {
name: "DetailsIDNotFound", name: "DetailsIDNotFound",
mdm: func() *mockDetailsMergeInfoer { mdm: func() *mockDetailsMergeInfoer {
@ -778,21 +671,18 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup.Backup{
Reasons: []kopia.Reason{
pathReason1,
},
},
},
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: {
BaseModel: model.BaseModel{ BaseModel: model.BaseModel{
ID: backup1.ID, ID: backup1.ID,
}, },
DetailsID: "foo", DetailsID: "foo",
}, },
Reasons: []kopia.Reason{
pathReason1,
},
},
}, },
errCheck: assert.Error, errCheck: assert.Error,
}, },
@ -805,17 +695,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -835,23 +722,20 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -871,17 +755,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -933,17 +814,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -963,17 +841,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -996,17 +871,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -1029,17 +901,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -1063,17 +932,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -1097,24 +963,20 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []kopia.ManifestEntry{ inputBackups: []kopia.BackupEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Backup: &backup1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
}, },
{ {
Manifest: makeManifest(suite.T(), backup2.ID, ""), Backup: &backup2,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason3, pathReason3,
}, },
}, },
}, },
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
backup2.ID: backup2,
},
populatedDetails: map[string]*details.Details{ populatedDetails: map[string]*details.Details{
backup1.DetailsID: { backup1.DetailsID: {
DetailsModel: details.DetailsModel{ DetailsModel: details.DetailsModel{
@ -1140,54 +1002,6 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
makeDetailsEntry(suite.T(), itemPath3, locationPath3, 37, false), makeDetailsEntry(suite.T(), itemPath3, locationPath3, 37, false),
}, },
}, },
{
name: "SomeBasesIncomplete",
mdm: func() *mockDetailsMergeInfoer {
res := newMockDetailsMergeInfoer()
res.add(itemPath1, itemPath1, locationPath1)
return res
}(),
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
pathReason1,
},
},
{
Manifest: makeManifest(suite.T(), backup2.ID, "checkpoint"),
Reasons: []kopia.Reason{
pathReason1,
},
},
},
populatedModels: map[model.StableID]backup.Backup{
backup1.ID: backup1,
backup2.ID: backup2,
},
populatedDetails: map[string]*details.Details{
backup1.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
*makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false),
},
},
},
backup2.DetailsID: {
DetailsModel: details.DetailsModel{
Entries: []details.Entry{
// This entry should not be picked due to being incomplete.
*makeDetailsEntry(suite.T(), itemPath1, locationPath1, 84, false),
},
},
},
},
errCheck: assert.NoError,
expectedEntries: []*details.Entry{
makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false),
},
},
} }
for _, test := range table { for _, test := range table {
@ -1198,15 +1012,13 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
defer flush() defer flush()
mds := ssmock.Streamer{Deets: test.populatedDetails} mds := ssmock.Streamer{Deets: test.populatedDetails}
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
deets := details.Builder{} deets := details.Builder{}
writeStats := kopia.BackupStats{} writeStats := kopia.BackupStats{}
err := mergeDetails( err := mergeDetails(
ctx, ctx,
w,
mds, mds,
test.inputMans, test.inputBackups,
test.mdm, test.mdm,
&deets, &deets,
&writeStats, &writeStats,
@ -1247,30 +1059,22 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
locPath1 = path.Builder{}.Append(itemPath1.Folders()...) locPath1 = path.Builder{}.Append(itemPath1.Folders()...)
backup1 = backup.Backup{
BaseModel: model.BaseModel{
ID: "bid1",
},
DetailsID: "did1",
}
pathReason1 = kopia.Reason{ pathReason1 = kopia.Reason{
ResourceOwner: itemPath1.ResourceOwner(), ResourceOwner: itemPath1.ResourceOwner(),
Service: itemPath1.Service(), Service: itemPath1.Service(),
Category: itemPath1.Category(), Category: itemPath1.Category(),
} }
inputMans = []kopia.ManifestEntry{ backup1 = kopia.BackupEntry{
{ Backup: &backup.Backup{
Manifest: makeManifest(t, backup1.ID, ""), BaseModel: model.BaseModel{
ID: "bid1",
},
DetailsID: "did1",
},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
pathReason1, pathReason1,
}, },
},
}
populatedModels = map[model.StableID]backup.Backup{
backup1.ID: backup1,
} }
itemSize = 42 itemSize = 42
@ -1313,16 +1117,14 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
var ( var (
mds = ssmock.Streamer{Deets: populatedDetails} mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{} deets = details.Builder{}
writeStats = kopia.BackupStats{} writeStats = kopia.BackupStats{}
) )
err := mergeDetails( err := mergeDetails(
ctx, ctx,
w,
mds, mds,
inputMans, []kopia.BackupEntry{backup1},
mdm, mdm,
&deets, &deets,
&writeStats, &writeStats,

View File

@ -13,19 +13,6 @@ import (
"github.com/alcionai/corso/src/pkg/store" "github.com/alcionai/corso/src/pkg/store"
) )
func getBackupFromID(
ctx context.Context,
backupID model.StableID,
ms *store.Wrapper,
) (*backup.Backup, error) {
bup, err := ms.GetBackup(ctx, backupID)
if err != nil {
return nil, clues.Wrap(err, "getting backup")
}
return bup, nil
}
func getBackupAndDetailsFromID( func getBackupAndDetailsFromID(
ctx context.Context, ctx context.Context,
backupID model.StableID, backupID model.StableID,
@ -38,6 +25,20 @@ func getBackupAndDetailsFromID(
return nil, nil, clues.Wrap(err, "getting backup") return nil, nil, clues.Wrap(err, "getting backup")
} }
deets, err := getDetailsFromBackup(ctx, bup, detailsStore, errs)
if err != nil {
return nil, nil, clues.Stack(err)
}
return bup, deets, nil
}
func getDetailsFromBackup(
ctx context.Context,
bup *backup.Backup,
detailsStore streamstore.Reader,
errs *fault.Bus,
) (*details.Details, error) {
var ( var (
deets details.Details deets details.Details
umt = streamstore.DetailsReader(details.UnmarshalTo(&deets)) umt = streamstore.DetailsReader(details.UnmarshalTo(&deets))
@ -49,12 +50,12 @@ func getBackupAndDetailsFromID(
} }
if len(ssid) == 0 { if len(ssid) == 0 {
return bup, nil, clues.New("no details or errors in backup").WithClues(ctx) return nil, clues.New("no details or errors in backup").WithClues(ctx)
} }
if err := detailsStore.Read(ctx, ssid, umt, errs); err != nil { if err := detailsStore.Read(ctx, ssid, umt, errs); err != nil {
return nil, nil, clues.Wrap(err, "reading backup data from streamstore") return nil, clues.Wrap(err, "reading backup data from streamstore")
} }
return bup, &deets, nil return &deets, nil
} }

View File

@ -4,74 +4,39 @@ import (
"context" "context"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/kopia/inject" "github.com/alcionai/corso/src/internal/kopia/inject"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
type manifestRestorer interface {
inject.BaseFinder
inject.RestoreProducer
}
type getBackuper interface {
GetBackup(
ctx context.Context,
backupID model.StableID,
) (*backup.Backup, error)
}
// calls kopia to retrieve prior backup manifests, metadata collections to supply backup heuristics. // calls kopia to retrieve prior backup manifests, metadata collections to supply backup heuristics.
// TODO(ashmrtn): Make this a helper function that always returns as much as
// possible and call in another function that drops metadata and/or
// kopia-assisted incremental bases based on flag values.
func produceManifestsAndMetadata( func produceManifestsAndMetadata(
ctx context.Context, ctx context.Context,
mr manifestRestorer, bf inject.BaseFinder,
gb getBackuper, rp inject.RestoreProducer,
reasons, fallbackReasons []kopia.Reason, reasons, fallbackReasons []kopia.Reason,
tenantID string, tenantID string,
getMetadata bool, getMetadata bool,
) ([]kopia.ManifestEntry, []data.RestoreCollection, bool, error) { ) (kopia.BackupBases, []data.RestoreCollection, bool, error) {
var ( var (
tags = map[string]string{kopia.TagBackupCategory: ""} tags = map[string]string{kopia.TagBackupCategory: ""}
metadataFiles = graph.AllMetadataFileNames() metadataFiles = graph.AllMetadataFileNames()
collections []data.RestoreCollection collections []data.RestoreCollection
) )
ms, err := mr.FindBases(ctx, reasons, tags) bb := bf.FindBases(ctx, reasons, tags)
if err != nil { // TODO(ashmrtn): Only fetch these if we haven't already covered all the
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots") // reasons for this backup.
} fbb := bf.FindBases(ctx, fallbackReasons, tags)
// We only need to check that we have 1:1 reason:base if we're doing an
// incremental with associated metadata. This ensures that we're only sourcing
// data from a single Point-In-Time (base) for each incremental backup.
//
// TODO(ashmrtn): This may need updating if we start sourcing item backup
// details from previous snapshots when using kopia-assisted incrementals.
if err := verifyDistinctBases(ctx, ms); err != nil {
logger.CtxErr(ctx, err).Info("base snapshot collision, falling back to full backup")
return ms, nil, false, nil
}
fbms, err := mr.FindBases(ctx, fallbackReasons, tags)
if err != nil {
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots under alternate id")
}
// Also check distinct bases for the fallback set.
if err := verifyDistinctBases(ctx, fbms); err != nil {
logger.CtxErr(ctx, err).Info("fallback snapshot collision, falling back to full backup")
return ms, nil, false, nil
}
// one of three cases can occur when retrieving backups across reason migrations: // one of three cases can occur when retrieving backups across reason migrations:
// 1. the current reasons don't match any manifests, and we use the fallback to // 1. the current reasons don't match any manifests, and we use the fallback to
@ -79,56 +44,26 @@ func produceManifestsAndMetadata(
// 2. the current reasons only contain an incomplete manifest, and the fallback // 2. the current reasons only contain an incomplete manifest, and the fallback
// can find a complete manifest. // can find a complete manifest.
// 3. the current reasons contain all the necessary manifests. // 3. the current reasons contain all the necessary manifests.
ms = unionManifests(reasons, ms, fbms) bb = bb.MergeBackupBases(
ctx,
fbb,
func(r kopia.Reason) string {
return r.Service.String() + r.Category.String()
})
if !getMetadata { if !getMetadata {
return ms, nil, false, nil logger.Ctx(ctx).Debug("full backup requested, dropping merge bases")
}
// TODO(ashmrtn): If this function is moved to be a helper function then
for _, man := range ms { // move this change to the bases to the caller of this function.
if len(man.IncompleteReason) > 0 { bb.ClearMergeBases()
continue
return bb, nil, false, nil
} }
for _, man := range bb.MergeBases() {
mctx := clues.Add(ctx, "manifest_id", man.ID) mctx := clues.Add(ctx, "manifest_id", man.ID)
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
err = clues.New("snapshot manifest missing backup ID").WithClues(ctx)
return nil, nil, false, err
}
mctx = clues.Add(mctx, "manifest_backup_id", bID)
bup, err := gb.GetBackup(mctx, model.StableID(bID))
// if no backup exists for any of the complete manifests, we want
// to fall back to a complete backup.
if errors.Is(err, data.ErrNotFound) {
logger.Ctx(mctx).Infow("backup missing, falling back to full backup", clues.In(mctx).Slice()...)
return ms, nil, false, nil
}
if err != nil {
return nil, nil, false, clues.Wrap(err, "retrieving prior backup data")
}
ssid := bup.StreamStoreID
if len(ssid) == 0 {
ssid = bup.DetailsID
}
mctx = clues.Add(mctx, "manifest_streamstore_id", ssid)
// if no detailsID exists for any of the complete manifests, we want
// to fall back to a complete backup. This is a temporary prevention
// mechanism to keep backups from falling into a perpetually bad state.
// This makes an assumption that the ID points to a populated set of
// details; we aren't doing the work to look them up.
if len(ssid) == 0 {
logger.Ctx(ctx).Infow("backup missing streamstore ID, falling back to full backup", clues.In(mctx).Slice()...)
return ms, nil, false, nil
}
// a local fault.Bus intance is used to collect metadata files here. // a local fault.Bus intance is used to collect metadata files here.
// we avoid the global fault.Bus because all failures here are ignorable, // we avoid the global fault.Bus because all failures here are ignorable,
// and cascading errors up to the operation can cause a conflict that forces // and cascading errors up to the operation can cause a conflict that forces
@ -137,9 +72,19 @@ func produceManifestsAndMetadata(
// spread around. Need to find more idiomatic handling. // spread around. Need to find more idiomatic handling.
fb := fault.New(true) fb := fault.New(true)
colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID, fb) colls, err := collectMetadata(mctx, rp, man, metadataFiles, tenantID, fb)
LogFaultErrors(ctx, fb.Errors(), "collecting metadata") LogFaultErrors(ctx, fb.Errors(), "collecting metadata")
// TODO(ashmrtn): It should be alright to relax this condition a little. We
// should be able to just remove the offending manifest and backup from the
// set of bases. Since we're looking at manifests in this loop, it should be
// possible to find the backup by either checking the reasons or extracting
// the backup ID from the manifests tags.
//
// Assuming that only the corso metadata is corrupted for the manifest, it
// should be safe to leave this manifest in the AssistBases set, though we
// could remove it there too if we want to be conservative. That can be done
// by finding the manifest ID.
if err != nil && !errors.Is(err, data.ErrNotFound) { if err != nil && !errors.Is(err, data.ErrNotFound) {
// prior metadata isn't guaranteed to exist. // prior metadata isn't guaranteed to exist.
// if it doesn't, we'll just have to do a // if it doesn't, we'll just have to do a
@ -150,148 +95,7 @@ func produceManifestsAndMetadata(
collections = append(collections, colls...) collections = append(collections, colls...)
} }
if err != nil { return bb, collections, true, nil
return nil, nil, false, err
}
return ms, collections, true, nil
}
// unionManifests reduces the two manifest slices into a single slice.
// Assumes fallback represents a prior manifest version (across some migration
// that disrupts manifest lookup), and that mans contains the current version.
// Also assumes the mans slice will have, at most, one complete and one incomplete
// manifest per service+category tuple.
//
// Selection priority, for each reason, follows these rules:
// 1. If the mans manifest is complete, ignore fallback manifests for that reason.
// 2. If the mans manifest is only incomplete, look for a matching complete manifest in fallbacks.
// 3. If mans has no entry for a reason, look for both complete and incomplete fallbacks.
func unionManifests(
reasons []kopia.Reason,
mans []kopia.ManifestEntry,
fallback []kopia.ManifestEntry,
) []kopia.ManifestEntry {
if len(fallback) == 0 {
return mans
}
if len(mans) == 0 {
return fallback
}
type manTup struct {
complete *kopia.ManifestEntry
incomplete *kopia.ManifestEntry
}
tups := map[string]manTup{}
for _, r := range reasons {
// no resource owner in the key. Assume it's the same owner across all
// manifests, but that the identifier is different due to migration.
k := r.Service.String() + r.Category.String()
tups[k] = manTup{}
}
// track the manifests that were collected with the current lookup
for i := range mans {
m := &mans[i]
for _, r := range m.Reasons {
k := r.Service.String() + r.Category.String()
t := tups[k]
// assume mans will have, at most, one complete and one incomplete per key
if len(m.IncompleteReason) > 0 {
t.incomplete = m
} else {
t.complete = m
}
tups[k] = t
}
}
// backfill from the fallback where necessary
for i := range fallback {
m := &fallback[i]
useReasons := []kopia.Reason{}
for _, r := range m.Reasons {
k := r.Service.String() + r.Category.String()
t := tups[k]
if t.complete != nil {
// assume fallbacks contains prior manifest versions.
// we don't want to stack a prior version incomplete onto
// a current version's complete snapshot.
continue
}
useReasons = append(useReasons, r)
if len(m.IncompleteReason) > 0 && t.incomplete == nil {
t.incomplete = m
} else if len(m.IncompleteReason) == 0 {
t.complete = m
}
tups[k] = t
}
if len(m.IncompleteReason) == 0 && len(useReasons) > 0 {
m.Reasons = useReasons
}
}
// collect the results into a single slice of manifests
ms := map[string]kopia.ManifestEntry{}
for _, m := range tups {
if m.complete != nil {
ms[string(m.complete.ID)] = *m.complete
}
if m.incomplete != nil {
ms[string(m.incomplete.ID)] = *m.incomplete
}
}
return maps.Values(ms)
}
// verifyDistinctBases is a validation checker that ensures, for a given slice
// of manifests, that each manifest's Reason (owner, service, category) is only
// included once. If a reason is duplicated by any two manifests, an error is
// returned.
func verifyDistinctBases(ctx context.Context, mans []kopia.ManifestEntry) error {
reasons := map[string]manifest.ID{}
for _, man := range mans {
// Incomplete snapshots are used only for kopia-assisted incrementals. The
// fact that we need this check here makes it seem like this should live in
// the kopia code. However, keeping it here allows for better debugging as
// the kopia code only has access to a path builder which means it cannot
// remove the resource owner from the error/log output. That is also below
// the point where we decide if we should do a full backup or an incremental.
if len(man.IncompleteReason) > 0 {
continue
}
for _, reason := range man.Reasons {
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
if b, ok := reasons[reasonKey]; ok {
return clues.New("manifests have overlapping reasons").
WithClues(ctx).
With("other_manifest_id", b)
}
reasons[reasonKey] = man.ID
}
}
return nil
} }
// collectMetadata retrieves all metadata files associated with the manifest. // collectMetadata retrieves all metadata files associated with the manifest.

File diff suppressed because it is too large Load Diff