Combine backup details merging structs (#3056)

Combine the sets of information used for merging
backup details so there's fewer things to pass
around and we can hide the function used to generate
lookup keys

This changes the prefix matcher to act like a regular
map for the moment (exact match) though OneDrive
LocationRef PRs will update that

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [x] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

* #2486

#### Test Plan

- [x] 💪 Manual
- [x]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-04-11 20:08:40 -07:00 committed by GitHub
parent 8867b63ccb
commit 52e627189a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 382 additions and 393 deletions

View File

@ -7,11 +7,66 @@ import (
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
type LocationPrefixMatcher struct { type DetailsMergeInfoer interface {
// Count returns the number of items that need to be merged.
ItemsToMerge() int
// GetNewRepoRef takes the path of the old location of the item and returns
// its new RepoRef if the item needs merged. If the item doesn't need merged
// returns nil.
GetNewRepoRef(oldRef *path.Builder) path.Path
// GetNewLocation takes the path of the folder containing the item and returns
// the location of the folder containing the item if it was updated. Otherwise
// returns nil.
GetNewLocation(oldRef *path.Builder) *path.Builder
}
type mergeDetails struct {
repoRefs map[string]path.Path
locations *locationPrefixMatcher
}
func (m *mergeDetails) ItemsToMerge() int {
if m == nil {
return 0
}
return len(m.repoRefs)
}
func (m *mergeDetails) addRepoRef(oldRef *path.Builder, newRef path.Path) error {
if _, ok := m.repoRefs[oldRef.ShortRef()]; ok {
return clues.New("duplicate RepoRef").With("repo_ref", oldRef.String())
}
m.repoRefs[oldRef.ShortRef()] = newRef
return nil
}
func (m *mergeDetails) GetNewRepoRef(oldRef *path.Builder) path.Path {
return m.repoRefs[oldRef.ShortRef()]
}
func (m *mergeDetails) addLocation(oldRef, newLoc *path.Builder) error {
return m.locations.add(oldRef, newLoc)
}
func (m *mergeDetails) GetNewLocation(oldRef *path.Builder) *path.Builder {
return m.locations.longestPrefix(oldRef.String())
}
func newMergeDetails() *mergeDetails {
return &mergeDetails{
repoRefs: map[string]path.Path{},
locations: newLocationPrefixMatcher(),
}
}
type locationPrefixMatcher struct {
m prefixmatcher.Matcher[*path.Builder] m prefixmatcher.Matcher[*path.Builder]
} }
func (m *LocationPrefixMatcher) Add(oldRef path.Path, newLoc *path.Builder) error { func (m *locationPrefixMatcher) add(oldRef, newLoc *path.Builder) error {
if _, ok := m.m.Get(oldRef.String()); ok { if _, ok := m.m.Get(oldRef.String()); ok {
return clues.New("RepoRef already in matcher").With("repo_ref", oldRef) return clues.New("RepoRef already in matcher").With("repo_ref", oldRef)
} }
@ -21,7 +76,7 @@ func (m *LocationPrefixMatcher) Add(oldRef path.Path, newLoc *path.Builder) erro
return nil return nil
} }
func (m *LocationPrefixMatcher) LongestPrefix(oldRef string) *path.Builder { func (m *locationPrefixMatcher) longestPrefix(oldRef string) *path.Builder {
if m == nil { if m == nil {
return nil return nil
} }
@ -36,6 +91,6 @@ func (m *LocationPrefixMatcher) LongestPrefix(oldRef string) *path.Builder {
return v return v
} }
func NewLocationPrefixMatcher() *LocationPrefixMatcher { func newLocationPrefixMatcher() *locationPrefixMatcher {
return &LocationPrefixMatcher{m: prefixmatcher.NewMatcher[*path.Builder]()} return &locationPrefixMatcher{m: prefixmatcher.NewMatcher[*path.Builder]()}
} }

View File

@ -1,4 +1,4 @@
package kopia_test package kopia
import ( import (
"testing" "testing"
@ -8,33 +8,58 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
var ( type DetailsMergeInfoerUnitSuite struct {
testTenant = "a-tenant"
testUser = "a-user"
service = path.ExchangeService
category = path.EmailCategory
)
type LocationPrefixMatcherUnitSuite struct {
tester.Suite tester.Suite
} }
func makePath( func TestDetailsMergeInfoerUnitSuite(t *testing.T) {
t *testing.T, suite.Run(t, &DetailsMergeInfoerUnitSuite{Suite: tester.NewUnitSuite(t)})
service path.ServiceType, }
category path.CategoryType,
tenant, user string, // TestRepoRefs is a basic sanity test to ensure lookups are working properly
folders []string, // for stored RepoRefs.
) path.Path { func (suite *DetailsMergeInfoerUnitSuite) TestRepoRefs() {
p, err := path.Build(tenant, user, service, category, false, folders...) t := suite.T()
oldRef := makePath(
t,
[]string{
testTenant,
service,
testUser,
category,
"folder1",
},
false).ToBuilder()
newRef := makePath(
t,
[]string{
testTenant,
service,
testUser,
category,
"folder2",
},
false)
dm := newMergeDetails()
err := dm.addRepoRef(oldRef, newRef)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return p got := dm.GetNewRepoRef(oldRef)
require.NotNil(t, got)
assert.Equal(t, newRef.String(), got.String())
got = dm.GetNewRepoRef(newRef.ToBuilder())
assert.Nil(t, got)
}
type LocationPrefixMatcherUnitSuite struct {
tester.Suite
} }
func TestLocationPrefixMatcherUnitSuite(t *testing.T) { func TestLocationPrefixMatcherUnitSuite(t *testing.T) {
@ -50,43 +75,52 @@ func (suite *LocationPrefixMatcherUnitSuite) TestAdd_Twice_Fails() {
t := suite.T() t := suite.T()
p := makePath( p := makePath(
t, t,
service, []string{
category, testTenant,
testTenant, service,
testUser, testUser,
[]string{"folder1"}) category,
"folder1",
},
false).ToBuilder()
loc1 := path.Builder{}.Append("folder1") loc1 := path.Builder{}.Append("folder1")
loc2 := path.Builder{}.Append("folder2") loc2 := path.Builder{}.Append("folder2")
lpm := kopia.NewLocationPrefixMatcher() lpm := newLocationPrefixMatcher()
err := lpm.Add(p, loc1) err := lpm.add(p, loc1)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = lpm.Add(p, loc2) err = lpm.add(p, loc2)
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
} }
func (suite *LocationPrefixMatcherUnitSuite) TestAdd_And_Match() { func (suite *LocationPrefixMatcherUnitSuite) TestAdd_And_Match() {
p1 := makePath( p1 := makePath(
suite.T(), suite.T(),
service, []string{
category, testTenant,
testTenant, service,
testUser, testUser,
[]string{"folder1"}) category,
"folder1",
},
false)
loc1 := path.Builder{}.Append("folder1")
p1Parent, err := p1.Dir() p1Parent, err := p1.Dir()
require.NoError(suite.T(), err, clues.ToCore(err)) require.NoError(suite.T(), err, clues.ToCore(err))
p2 := makePath( p2 := makePath(
suite.T(), suite.T(),
service, []string{
category, testTenant,
testTenant, service,
testUser, testUser,
[]string{"folder2"}) category,
loc1 := path.Builder{}.Append("folder1") "folder2",
},
false)
table := []struct { table := []struct {
name string name string
@ -134,14 +168,14 @@ func (suite *LocationPrefixMatcherUnitSuite) TestAdd_And_Match() {
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
lpm := kopia.NewLocationPrefixMatcher() lpm := newLocationPrefixMatcher()
for _, input := range test.inputs { for _, input := range test.inputs {
err := lpm.Add(input.repoRef, input.locRef) err := lpm.add(input.repoRef.ToBuilder(), input.locRef)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
} }
loc := lpm.LongestPrefix(test.searchKey) loc := lpm.longestPrefix(test.searchKey)
test.check(t, loc) test.check(t, loc)
if loc == nil { if loc == nil {

View File

@ -137,7 +137,7 @@ type corsoProgress struct {
deets *details.Builder deets *details.Builder
// toMerge represents items that we don't have in-memory item info for. The // toMerge represents items that we don't have in-memory item info for. The
// item info for these items should be sourced from a base snapshot later on. // item info for these items should be sourced from a base snapshot later on.
toMerge map[string]PrevRefs toMerge *mergeDetails
mu sync.RWMutex mu sync.RWMutex
totalBytes int64 totalBytes int64
errs *fault.Bus errs *fault.Bus
@ -195,9 +195,14 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
cp.mu.Lock() cp.mu.Lock()
defer cp.mu.Unlock() defer cp.mu.Unlock()
cp.toMerge[d.prevPath.ShortRef()] = PrevRefs{ err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath)
Repo: d.repoPath, if err != nil {
Location: d.locationPath, cp.errs.AddRecoverable(clues.Wrap(err, "adding item to merge list").
With(
"service", d.repoPath.Service().String(),
"category", d.repoPath.Category().String(),
).
Label(fault.LabelForceNoBackupCreation))
} }
return return
@ -711,7 +716,8 @@ func getTreeNode(roots map[string]*treeMap, pathElements []string) *treeMap {
func inflateCollectionTree( func inflateCollectionTree(
ctx context.Context, ctx context.Context,
collections []data.BackupCollection, collections []data.BackupCollection,
) (map[string]*treeMap, map[string]path.Path, *LocationPrefixMatcher, error) { toMerge *mergeDetails,
) (map[string]*treeMap, map[string]path.Path, error) {
roots := make(map[string]*treeMap) roots := make(map[string]*treeMap)
// Contains the old path for collections that have been moved or renamed. // Contains the old path for collections that have been moved or renamed.
// Allows resolving what the new path should be when walking the base // Allows resolving what the new path should be when walking the base
@ -720,28 +726,18 @@ func inflateCollectionTree(
// Temporary variable just to track the things that have been marked as // Temporary variable just to track the things that have been marked as
// changed while keeping a reference to their path. // changed while keeping a reference to their path.
changedPaths := []path.Path{} changedPaths := []path.Path{}
// updatedLocations maps from the collections RepoRef to the updated location
// path for all moved collections. New collections aren't tracked because we
// will have their location explicitly. This is used by the backup details
// merge code to update locations for items in nested folders that got moved
// when the top-level folder got moved. The nested folder may not generate a
// delta result but will need the location updated.
//
// This could probably use a path.Builder as the value instead of a string if
// we wanted.
updatedLocations := NewLocationPrefixMatcher()
for _, s := range collections { for _, s := range collections {
switch s.State() { switch s.State() {
case data.DeletedState: case data.DeletedState:
if s.PreviousPath() == nil { if s.PreviousPath() == nil {
return nil, nil, nil, clues.New("nil previous path on deleted collection") return nil, nil, clues.New("nil previous path on deleted collection")
} }
changedPaths = append(changedPaths, s.PreviousPath()) changedPaths = append(changedPaths, s.PreviousPath())
if _, ok := updatedPaths[s.PreviousPath().String()]; ok { if _, ok := updatedPaths[s.PreviousPath().String()]; ok {
return nil, nil, nil, clues.New("multiple previous state changes to collection"). return nil, nil, clues.New("multiple previous state changes to collection").
With("collection_previous_path", s.PreviousPath()) With("collection_previous_path", s.PreviousPath())
} }
@ -753,7 +749,7 @@ func inflateCollectionTree(
changedPaths = append(changedPaths, s.PreviousPath()) changedPaths = append(changedPaths, s.PreviousPath())
if _, ok := updatedPaths[s.PreviousPath().String()]; ok { if _, ok := updatedPaths[s.PreviousPath().String()]; ok {
return nil, nil, nil, clues.New("multiple previous state changes to collection"). return nil, nil, clues.New("multiple previous state changes to collection").
With("collection_previous_path", s.PreviousPath()) With("collection_previous_path", s.PreviousPath())
} }
@ -763,25 +759,25 @@ func inflateCollectionTree(
// TODO(ashmrtn): Get old location ref and add it to the prefix matcher. // TODO(ashmrtn): Get old location ref and add it to the prefix matcher.
lp, ok := s.(data.LocationPather) lp, ok := s.(data.LocationPather)
if ok && s.PreviousPath() != nil { if ok && s.PreviousPath() != nil {
if err := updatedLocations.Add(s.PreviousPath(), lp.LocationPath()); err != nil { if err := toMerge.addLocation(s.PreviousPath().ToBuilder(), lp.LocationPath()); err != nil {
return nil, nil, nil, clues.Wrap(err, "building updated location set"). return nil, nil, clues.Wrap(err, "building updated location set").
With("collection_location", lp.LocationPath()) With("collection_location", lp.LocationPath())
} }
} }
if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 { if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 {
return nil, nil, nil, clues.New("no identifier for collection") return nil, nil, clues.New("no identifier for collection")
} }
node := getTreeNode(roots, s.FullPath().Elements()) node := getTreeNode(roots, s.FullPath().Elements())
if node == nil { if node == nil {
return nil, nil, nil, clues.New("getting tree node").With("collection_full_path", s.FullPath()) return nil, nil, clues.New("getting tree node").With("collection_full_path", s.FullPath())
} }
// Make sure there's only a single collection adding items for any given // Make sure there's only a single collection adding items for any given
// path in the new hierarchy. // path in the new hierarchy.
if node.collection != nil { if node.collection != nil {
return nil, nil, nil, clues.New("multiple instances of collection").With("collection_full_path", s.FullPath()) return nil, nil, clues.New("multiple instances of collection").With("collection_full_path", s.FullPath())
} }
node.collection = s node.collection = s
@ -799,11 +795,11 @@ func inflateCollectionTree(
} }
if node.collection != nil && node.collection.State() == data.NotMovedState { if node.collection != nil && node.collection.State() == data.NotMovedState {
return nil, nil, nil, clues.New("conflicting states for collection").With("changed_path", p) return nil, nil, clues.New("conflicting states for collection").With("changed_path", p)
} }
} }
return roots, updatedPaths, updatedLocations, nil return roots, updatedPaths, nil
} }
// traverseBaseDir is an unoptimized function that reads items in a directory // traverseBaseDir is an unoptimized function that reads items in a directory
@ -1034,10 +1030,10 @@ func inflateDirTree(
collections []data.BackupCollection, collections []data.BackupCollection,
globalExcludeSet map[string]map[string]struct{}, globalExcludeSet map[string]map[string]struct{},
progress *corsoProgress, progress *corsoProgress,
) (fs.Directory, *LocationPrefixMatcher, error) { ) (fs.Directory, error) {
roots, updatedPaths, updatedLocations, err := inflateCollectionTree(ctx, collections) roots, updatedPaths, err := inflateCollectionTree(ctx, collections, progress.toMerge)
if err != nil { if err != nil {
return nil, nil, clues.Wrap(err, "inflating collection tree") return nil, clues.Wrap(err, "inflating collection tree")
} }
baseIDs := make([]manifest.ID, 0, len(baseSnaps)) baseIDs := make([]manifest.ID, 0, len(baseSnaps))
@ -1055,12 +1051,12 @@ func inflateDirTree(
for _, snap := range baseSnaps { for _, snap := range baseSnaps {
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil { if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {
return nil, nil, clues.Wrap(err, "inflating base snapshot tree(s)") return nil, clues.Wrap(err, "inflating base snapshot tree(s)")
} }
} }
if len(roots) > 1 { if len(roots) > 1 {
return nil, nil, clues.New("multiple root directories") return nil, clues.New("multiple root directories")
} }
var res fs.Directory var res fs.Directory
@ -1068,11 +1064,11 @@ func inflateDirTree(
for dirName, dir := range roots { for dirName, dir := range roots {
tmp, err := buildKopiaDirs(dirName, dir, globalExcludeSet, progress) tmp, err := buildKopiaDirs(dirName, dir, globalExcludeSet, progress)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
res = tmp res = tmp
} }
return res, updatedLocations, nil return res, nil
} }

View File

@ -532,7 +532,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: bd, deets: bd,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: map[string]PrevRefs{}, toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -542,7 +542,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
cp.FinishedFile(suite.targetFileName, nil) cp.FinishedFile(suite.targetFileName, nil)
assert.Empty(t, cp.toMerge) assert.Equal(t, 0, cp.toMerge.ItemsToMerge())
// Gather information about the current state. // Gather information about the current state.
var ( var (
@ -587,6 +587,11 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
} }
func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarchy() { func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarchy() {
type expectedRef struct {
oldRef *path.Builder
newRef path.Path
}
t := suite.T() t := suite.T()
prevPath := makePath( prevPath := makePath(
@ -595,10 +600,11 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
true, true,
) )
expectedToMerge := map[string]PrevRefs{ // Location is sourced from collections now so we don't need to check it here.
prevPath.ShortRef(): { expectedToMerge := []expectedRef{
Repo: suite.targetFilePath, {
Location: suite.targetFileLoc, oldRef: prevPath.ToBuilder(),
newRef: suite.targetFilePath,
}, },
} }
@ -608,7 +614,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
UploadProgress: &snapshotfs.NullUploadProgress{}, UploadProgress: &snapshotfs.NullUploadProgress{},
deets: db, deets: db,
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: map[string]PrevRefs{}, toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -623,8 +629,16 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
require.Len(t, cp.pending, 1) require.Len(t, cp.pending, 1)
cp.FinishedFile(suite.targetFileName, nil) cp.FinishedFile(suite.targetFileName, nil)
assert.Equal(t, expectedToMerge, cp.toMerge)
assert.Empty(t, cp.deets) assert.Empty(t, cp.deets)
for _, expected := range expectedToMerge {
gotRef := cp.toMerge.GetNewRepoRef(expected.oldRef)
if !assert.NotNil(t, gotRef) {
continue
}
assert.Equal(t, expected.newRef.String(), gotRef.String())
}
} }
func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() { func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
@ -710,36 +724,41 @@ func (suite *HierarchyBuilderUnitSuite) TestPopulatesPrefixMatcher() {
cols := []data.BackupCollection{c1, c2, c3} cols := []data.BackupCollection{c1, c2, c3}
_, locPaths, err := inflateDirTree(ctx, nil, nil, cols, nil, nil) cp := corsoProgress{
toMerge: newMergeDetails(),
errs: fault.New(true),
}
_, err := inflateDirTree(ctx, nil, nil, cols, nil, &cp)
require.NoError(t, err) require.NoError(t, err)
table := []struct { table := []struct {
inputPath string inputPath *path.Builder
check require.ValueAssertionFunc check require.ValueAssertionFunc
expectedLoc *path.Builder expectedLoc *path.Builder
}{ }{
{ {
inputPath: p1.String(), inputPath: p1.ToBuilder(),
check: require.NotNil, check: require.NotNil,
expectedLoc: path.Builder{}.Append(p1.Folders()...), expectedLoc: path.Builder{}.Append(p1.Folders()...),
}, },
{ {
inputPath: p3.String(), inputPath: p3.ToBuilder(),
check: require.NotNil, check: require.NotNil,
expectedLoc: path.Builder{}.Append(p2.Folders()...), expectedLoc: path.Builder{}.Append(p2.Folders()...),
}, },
{ {
inputPath: p4.String(), inputPath: p4.ToBuilder(),
check: require.Nil, check: require.Nil,
expectedLoc: nil, expectedLoc: nil,
}, },
} }
for _, test := range table { for _, test := range table {
suite.Run(test.inputPath, func() { suite.Run(test.inputPath.String(), func() {
t := suite.T() t := suite.T()
loc := locPaths.LongestPrefix(test.inputPath) loc := cp.toMerge.GetNewLocation(test.inputPath)
test.check(t, loc) test.check(t, loc)
if loc == nil { if loc == nil {
@ -776,6 +795,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -801,7 +821,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
// - emails // - emails
// - Inbox // - Inbox
// - 42 separate files // - 42 separate files
dirTree, _, err := inflateDirTree(ctx, nil, nil, collections, nil, progress) dirTree, err := inflateDirTree(ctx, nil, nil, collections, nil, progress)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name()) assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
@ -894,10 +914,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
dirTree, _, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress) dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name()) assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
@ -998,7 +1019,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
_, _, err := inflateDirTree(ctx, nil, nil, test.layout, nil, nil) progress := &corsoProgress{
toMerge: newMergeDetails(),
errs: fault.New(true),
}
_, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
assert.Error(t, err, clues.ToCore(err)) assert.Error(t, err, clues.ToCore(err))
}) })
} }
@ -1088,6 +1114,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -1110,7 +1137,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
cols = append(cols, mc) cols = append(cols, mc)
} }
_, _, err := inflateDirTree(ctx, nil, nil, cols, nil, progress) _, err := inflateDirTree(ctx, nil, nil, cols, nil, progress)
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
}) })
} }
@ -1379,13 +1406,14 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
msw := &mockSnapshotWalker{ msw := &mockSnapshotWalker{
snapshotRoot: getBaseSnapshot(), snapshotRoot: getBaseSnapshot(),
} }
dirTree, _, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []IncrementalBase{
@ -2158,13 +2186,14 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
msw := &mockSnapshotWalker{ msw := &mockSnapshotWalker{
snapshotRoot: getBaseSnapshot(), snapshotRoot: getBaseSnapshot(),
} }
dirTree, _, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []IncrementalBase{
@ -2306,6 +2335,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
mc := mockconnector.NewMockExchangeCollection(suite.testStoragePath, suite.testStoragePath, 1) mc := mockconnector.NewMockExchangeCollection(suite.testStoragePath, suite.testStoragePath, 1)
@ -2327,7 +2357,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
// - file3 // - file3
// - work // - work
// - file4 // - file4
dirTree, _, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []IncrementalBase{
@ -2407,6 +2437,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
mc := mockconnector.NewMockExchangeCollection(archiveStorePath, archiveLocPath, 1) mc := mockconnector.NewMockExchangeCollection(archiveStorePath, archiveLocPath, 1)
@ -2431,7 +2462,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
// - emails // - emails
// - Archive // - Archive
// - file2 // - file2
dirTree, _, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []IncrementalBase{
@ -2662,6 +2693,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
toMerge: newMergeDetails(),
errs: fault.New(true), errs: fault.New(true),
} }
@ -2680,7 +2712,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
collections := []data.BackupCollection{mc} collections := []data.BackupCollection{mc}
dirTree, _, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
msw, msw,
[]IncrementalBase{ []IncrementalBase{

View File

@ -124,13 +124,6 @@ type IncrementalBase struct {
SubtreePaths []*path.Builder SubtreePaths []*path.Builder
} }
// PrevRefs hold the repoRef and locationRef from the items
// that need to be merged in from prior snapshots.
type PrevRefs struct {
Repo path.Path
Location *path.Builder
}
// ConsumeBackupCollections takes a set of collections and creates a kopia snapshot // ConsumeBackupCollections takes a set of collections and creates a kopia snapshot
// with the data that they contain. previousSnapshots is used for incremental // with the data that they contain. previousSnapshots is used for incremental
// backups and should represent the base snapshot from which metadata is sourced // backups and should represent the base snapshot from which metadata is sourced
@ -145,22 +138,22 @@ func (w Wrapper) ConsumeBackupCollections(
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Bus, errs *fault.Bus,
) (*BackupStats, *details.Builder, map[string]PrevRefs, *LocationPrefixMatcher, error) { ) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
if w.c == nil { if w.c == nil {
return nil, nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx) return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
} }
ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections") ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections")
defer end() defer end()
if len(collections) == 0 && len(globalExcludeSet) == 0 { if len(collections) == 0 && len(globalExcludeSet) == 0 {
return &BackupStats{}, &details.Builder{}, nil, nil, nil return &BackupStats{}, &details.Builder{}, nil, nil
} }
progress := &corsoProgress{ progress := &corsoProgress{
pending: map[string]*itemDetails{}, pending: map[string]*itemDetails{},
deets: &details.Builder{}, deets: &details.Builder{},
toMerge: map[string]PrevRefs{}, toMerge: newMergeDetails(),
errs: errs, errs: errs,
} }
@ -172,7 +165,7 @@ func (w Wrapper) ConsumeBackupCollections(
base = previousSnapshots base = previousSnapshots
} }
dirTree, updatedLocations, err := inflateDirTree( dirTree, err := inflateDirTree(
ctx, ctx,
w.c, w.c,
base, base,
@ -180,7 +173,7 @@ func (w Wrapper) ConsumeBackupCollections(
globalExcludeSet, globalExcludeSet,
progress) progress)
if err != nil { if err != nil {
return nil, nil, nil, nil, clues.Wrap(err, "building kopia directories") return nil, nil, nil, clues.Wrap(err, "building kopia directories")
} }
s, err := w.makeSnapshotWithRoot( s, err := w.makeSnapshotWithRoot(
@ -190,10 +183,10 @@ func (w Wrapper) ConsumeBackupCollections(
tags, tags,
progress) progress)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, nil, nil, err
} }
return s, progress.deets, progress.toMerge, updatedLocations, progress.errs.Failure() return s, progress.deets, progress.toMerge, progress.errs.Failure()
} }
func (w Wrapper) makeSnapshotWithRoot( func (w Wrapper) makeSnapshotWithRoot(

View File

@ -276,7 +276,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
suite.Run(test.name, func() { suite.Run(test.name, func() {
t := suite.T() t := suite.T()
stats, deets, _, _, err := suite.w.ConsumeBackupCollections( stats, deets, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
prevSnaps, prevSnaps,
collections, collections,
@ -423,7 +423,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
t := suite.T() t := suite.T()
collections := test.cols() collections := test.cols()
stats, deets, prevShortRefs, _, err := suite.w.ConsumeBackupCollections( stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
prevSnaps, prevSnaps,
collections, collections,
@ -459,13 +459,9 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
assert.False(t, onedrive.IsMetaFile(entry.RepoRef), "metadata entry in details") assert.False(t, onedrive.IsMetaFile(entry.RepoRef), "metadata entry in details")
} }
assert.Len(t, prevShortRefs, 0) // Shouldn't have any items to merge because the cached files are metadata
for _, prevRef := range prevShortRefs { // files.
assert.False( assert.Equal(t, 0, prevShortRefs.ItemsToMerge())
t,
onedrive.IsMetaFile(prevRef.Repo.String()),
"metadata entry in base details")
}
checkSnapshotTags( checkSnapshotTags(
t, t,
@ -525,7 +521,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
fp2, err := suite.storePath2.Append(dc2.Names[0], true) fp2, err := suite.storePath2.Append(dc2.Names[0], true)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
stats, _, _, _, err := w.ConsumeBackupCollections( stats, _, _, err := w.ConsumeBackupCollections(
ctx, ctx,
nil, nil,
[]data.BackupCollection{dc1, dc2}, []data.BackupCollection{dc1, dc2},
@ -644,7 +640,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
}, },
} }
stats, deets, _, _, err := suite.w.ConsumeBackupCollections( stats, deets, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
nil, nil,
collections, collections,
@ -706,7 +702,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
ctx, flush := tester.NewContext() ctx, flush := tester.NewContext()
defer flush() defer flush()
s, d, _, _, err := suite.w.ConsumeBackupCollections( s, d, _, err := suite.w.ConsumeBackupCollections(
ctx, ctx,
nil, nil,
test.collections, test.collections,
@ -866,7 +862,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
tags[k] = "" tags[k] = ""
} }
stats, deets, _, _, err := suite.w.ConsumeBackupCollections( stats, deets, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
nil, nil,
collections, collections,
@ -1018,7 +1014,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
} }
} }
stats, _, _, _, err := suite.w.ConsumeBackupCollections( stats, _, _, err := suite.w.ConsumeBackupCollections(
suite.ctx, suite.ctx,
[]IncrementalBase{ []IncrementalBase{
{ {

View File

@ -264,7 +264,7 @@ func (op *BackupOperation) do(
ctx = clues.Add(ctx, "coll_count", len(cs)) ctx = clues.Add(ctx, "coll_count", len(cs))
writeStats, deets, toMerge, updatedLocs, err := consumeBackupCollections( writeStats, deets, toMerge, err := consumeBackupCollections(
ctx, ctx,
op.kopia, op.kopia,
op.account.ID(), op.account.ID(),
@ -287,7 +287,6 @@ func (op *BackupOperation) do(
detailsStore, detailsStore,
mans, mans,
toMerge, toMerge,
updatedLocs,
deets, deets,
op.Errors) op.Errors)
if err != nil { if err != nil {
@ -412,7 +411,7 @@ func consumeBackupCollections(
backupID model.StableID, backupID model.StableID,
isIncremental bool, isIncremental bool,
errs *fault.Bus, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error) { ) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
complete, closer := observe.MessageWithCompletion(ctx, "Backing up data") complete, closer := observe.MessageWithCompletion(ctx, "Backing up data")
defer func() { defer func() {
complete <- struct{}{} complete <- struct{}{}
@ -441,7 +440,7 @@ func consumeBackupCollections(
for _, reason := range m.Reasons { for _, reason := range m.Reasons {
pb, err := builderFromReason(ctx, tenantID, reason) pb, err := builderFromReason(ctx, tenantID, reason)
if err != nil { if err != nil {
return nil, nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases") return nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases")
} }
paths = append(paths, pb) paths = append(paths, pb)
@ -477,7 +476,7 @@ func consumeBackupCollections(
"base_backup_id", mbID) "base_backup_id", mbID)
} }
kopiaStats, deets, itemsSourcedFromBase, updatedLocs, err := bc.ConsumeBackupCollections( kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections(
ctx, ctx,
bases, bases,
cs, cs,
@ -487,10 +486,10 @@ func consumeBackupCollections(
errs) errs)
if err != nil { if err != nil {
if kopiaStats == nil { if kopiaStats == nil {
return nil, nil, nil, nil, err return nil, nil, nil, err
} }
return nil, nil, nil, nil, clues.Stack(err).With( return nil, nil, nil, clues.Stack(err).With(
"kopia_errors", kopiaStats.ErrorCount, "kopia_errors", kopiaStats.ErrorCount,
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount) "kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
} }
@ -502,7 +501,7 @@ func consumeBackupCollections(
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount) "kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
} }
return kopiaStats, deets, itemsSourcedFromBase, updatedLocs, err return kopiaStats, deets, itemsSourcedFromBase, err
} }
func matchesReason(reasons []kopia.Reason, p path.Path) bool { func matchesReason(reasons []kopia.Reason, p path.Path) bool {
@ -522,13 +521,12 @@ func mergeDetails(
ms *store.Wrapper, ms *store.Wrapper,
detailsStore streamstore.Streamer, detailsStore streamstore.Streamer,
mans []*kopia.ManifestEntry, mans []*kopia.ManifestEntry,
shortRefsFromPrevBackup map[string]kopia.PrevRefs, dataFromBackup kopia.DetailsMergeInfoer,
updatedLocs *kopia.LocationPrefixMatcher,
deets *details.Builder, deets *details.Builder,
errs *fault.Bus, errs *fault.Bus,
) error { ) error {
// Don't bother loading any of the base details if there's nothing we need to merge. // Don't bother loading any of the base details if there's nothing we need to merge.
if len(shortRefsFromPrevBackup) == 0 { if dataFromBackup.ItemsToMerge() == 0 {
return nil return nil
} }
@ -581,17 +579,16 @@ func mergeDetails(
continue continue
} }
prev, ok := shortRefsFromPrevBackup[rr.ShortRef()] pb := rr.ToBuilder()
if !ok {
newPath := dataFromBackup.GetNewRepoRef(pb)
if newPath == nil {
// This entry was not sourced from a base snapshot or cached from a // This entry was not sourced from a base snapshot or cached from a
// previous backup, skip it. // previous backup, skip it.
continue continue
} }
newPath := prev.Repo newLoc := dataFromBackup.GetNewLocation(pb.Dir())
// Locations are done by collection RepoRef so remove the item from the
// input.
newLoc := updatedLocs.LongestPrefix(rr.ToBuilder().Dir().String())
// Fixup paths in the item. // Fixup paths in the item.
item := entry.ItemInfo item := entry.ItemInfo
@ -637,10 +634,12 @@ func mergeDetails(
"base_item_count_added", manifestAddedEntries) "base_item_count_added", manifestAddedEntries)
} }
if addedEntries != len(shortRefsFromPrevBackup) { if addedEntries != dataFromBackup.ItemsToMerge() {
return clues.New("incomplete migration of backup details"). return clues.New("incomplete migration of backup details").
WithClues(ctx). WithClues(ctx).
With("item_count", addedEntries, "expected_item_count", len(shortRefsFromPrevBackup)) With(
"item_count", addedEntries,
"expected_item_count", dataFromBackup.ItemsToMerge())
} }
return nil return nil

View File

@ -102,12 +102,12 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections(
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Bus, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error) { ) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
if mbu.checkFunc != nil { if mbu.checkFunc != nil {
mbu.checkFunc(bases, cs, tags, buildTreeWithBase) mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
} }
return &kopia.BackupStats{}, &details.Builder{}, nil, nil, nil return &kopia.BackupStats{}, &details.Builder{}, nil, nil
} }
// ----- model store for backups // ----- model store for backups
@ -181,6 +181,47 @@ func (mbs mockBackupStorer) Update(context.Context, model.Schema, model.Model) e
return clues.New("not implemented") return clues.New("not implemented")
} }
// ----- model store for backups
type mockDetailsMergeInfoer struct {
repoRefs map[string]path.Path
locs map[string]*path.Builder
}
func (m *mockDetailsMergeInfoer) add(oldRef, newRef path.Path, newLoc *path.Builder) {
oldPB := oldRef.ToBuilder()
// Items are indexed individually.
m.repoRefs[oldPB.ShortRef()] = newRef
if newLoc != nil {
// Locations are indexed by directory.
m.locs[oldPB.Dir().ShortRef()] = newLoc
}
}
func (m *mockDetailsMergeInfoer) GetNewRepoRef(oldRef *path.Builder) path.Path {
return m.repoRefs[oldRef.ShortRef()]
}
func (m *mockDetailsMergeInfoer) GetNewLocation(oldRef *path.Builder) *path.Builder {
return m.locs[oldRef.ShortRef()]
}
func (m *mockDetailsMergeInfoer) ItemsToMerge() int {
if m == nil {
return 0
}
return len(m.repoRefs)
}
func newMockDetailsMergeInfoer() *mockDetailsMergeInfoer {
return &mockDetailsMergeInfoer{
repoRefs: map[string]path.Path{},
locs: map[string]*path.Builder{},
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// helper funcs // helper funcs
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -669,12 +710,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
require.NoError(suite.T(), err, clues.ToCore(err)) require.NoError(suite.T(), err, clues.ToCore(err))
table := []struct { table := []struct {
name string name string
populatedModels map[model.StableID]backup.Backup populatedModels map[model.StableID]backup.Backup
populatedDetails map[string]*details.Details populatedDetails map[string]*details.Details
inputMans []*kopia.ManifestEntry inputMans []*kopia.ManifestEntry
inputShortRefsFromPrevBackup map[string]kopia.PrevRefs mdm *mockDetailsMergeInfoer
prefixMatcher *kopia.LocationPrefixMatcher
errCheck assert.ErrorAssertionFunc errCheck assert.ErrorAssertionFunc
expectedEntries []*details.DetailsEntry expectedEntries []*details.DetailsEntry
@ -686,30 +726,19 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
expectedEntries: []*details.DetailsEntry{}, expectedEntries: []*details.DetailsEntry{},
}, },
{ {
name: "EmptyShortRefsFromPrevBackup", name: "EmptyShortRefsFromPrevBackup",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{}, mdm: newMockDetailsMergeInfoer(),
errCheck: assert.NoError, errCheck: assert.NoError,
// Use empty slice so we don't error out on nil != empty. // Use empty slice so we don't error out on nil != empty.
expectedEntries: []*details.DetailsEntry{}, expectedEntries: []*details.DetailsEntry{},
}, },
{ {
name: "BackupIDNotFound", name: "BackupIDNotFound",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -723,22 +752,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "DetailsIDNotFound", name: "DetailsIDNotFound",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -760,32 +778,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "BaseMissingItems", name: "BaseMissingItems",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1, res.add(itemPath2, itemPath2, locationPath2)
},
itemPath2.ShortRef(): {
Repo: itemPath2,
Location: locationPath2,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
rr, err = itemPath2.Dir()
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath2)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -811,22 +809,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "TooManyItems", name: "TooManyItems",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -858,22 +845,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "BadBaseRepoRef", name: "BadBaseRepoRef",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath2, res.add(itemPath1, itemPath2, locationPath2)
Location: locationPath2,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath2)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -918,25 +894,24 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "BadOneDrivePath", name: "BadOneDrivePath",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: makePath( p := makePath(
suite.T(), suite.T(),
[]string{ []string{
itemPath1.Tenant(), itemPath1.Tenant(),
path.OneDriveService.String(), path.OneDriveService.String(),
itemPath1.ResourceOwner(), itemPath1.ResourceOwner(),
path.FilesCategory.String(), path.FilesCategory.String(),
"personal", "personal",
"item1", "item1",
}, },
true, true,
), )
},
}, res.add(itemPath1, p, nil)
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher() return res
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -962,22 +937,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "ItemMerged", name: "ItemMerged",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1006,14 +970,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "ItemMergedNoLocation", name: "ItemMergedNoLocation",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, nil)
},
}, return res
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1042,22 +1003,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "ItemMergedSameLocation", name: "ItemMergedSameLocation",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1086,22 +1036,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "ItemMergedExtraItemsInBase", name: "ItemMergedExtraItemsInBase",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1131,22 +1070,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "ItemMoved", name: "ItemMoved",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath2, res.add(itemPath1, itemPath2, locationPath2)
Location: locationPath2,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath2)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1175,32 +1103,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "MultipleBases", name: "MultipleBases",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1, res.add(itemPath3, itemPath3, locationPath3)
},
itemPath3.ShortRef(): {
Repo: itemPath3,
Location: locationPath3,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
rr, err = itemPath3.Dir()
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath3)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1247,22 +1155,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
}, },
{ {
name: "SomeBasesIncomplete", name: "SomeBasesIncomplete",
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{ mdm: func() *mockDetailsMergeInfoer {
itemPath1.ShortRef(): { res := newMockDetailsMergeInfoer()
Repo: itemPath1, res.add(itemPath1, itemPath1, locationPath1)
Location: locationPath1,
},
},
prefixMatcher: func() *kopia.LocationPrefixMatcher {
p := kopia.NewLocationPrefixMatcher()
rr, err := itemPath1.Dir() return res
require.NoError(suite.T(), err, clues.ToCore(err))
err = p.Add(rr, locationPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
return p
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []*kopia.ManifestEntry{
{ {
@ -1322,8 +1219,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
w, w,
mds, mds,
test.inputMans, test.inputMans,
test.inputShortRefsFromPrevBackup, test.mdm,
test.prefixMatcher,
&deets, &deets,
fault.New(true)) fault.New(true))
test.errCheck(t, err, clues.ToCore(err)) test.errCheck(t, err, clues.ToCore(err))
@ -1373,13 +1269,6 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
Category: itemPath1.Category(), Category: itemPath1.Category(),
} }
inputToMerge = map[string]kopia.PrevRefs{
itemPath1.ShortRef(): {
Repo: itemPath1,
Location: locPath1,
},
}
inputMans = []*kopia.ManifestEntry{ inputMans = []*kopia.ManifestEntry{
{ {
Manifest: makeManifest(t, backup1.ID, ""), Manifest: makeManifest(t, backup1.ID, ""),
@ -1398,12 +1287,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
// later = now.Add(42 * time.Minute) // later = now.Add(42 * time.Minute)
) )
itemDir, err := itemPath1.Dir() mdm := newMockDetailsMergeInfoer()
require.NoError(t, err, clues.ToCore(err)) mdm.add(itemPath1, itemPath1, locPath1)
prefixMatcher := kopia.NewLocationPrefixMatcher()
err = prefixMatcher.Add(itemDir, locPath1)
require.NoError(suite.T(), err, clues.ToCore(err))
itemDetails := makeDetailsEntry(t, itemPath1, locPath1, itemSize, false) itemDetails := makeDetailsEntry(t, itemPath1, locPath1, itemSize, false)
// itemDetails.Exchange.Modified = now // itemDetails.Exchange.Modified = now
@ -1438,13 +1323,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
deets = details.Builder{} deets = details.Builder{}
) )
err = mergeDetails( err := mergeDetails(
ctx, ctx,
w, w,
mds, mds,
inputMans, inputMans,
inputToMerge, mdm,
prefixMatcher,
&deets, &deets,
fault.New(true)) fault.New(true))
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))

View File

@ -37,7 +37,7 @@ type (
tags map[string]string, tags map[string]string,
buildTreeWithBase bool, buildTreeWithBase bool,
errs *fault.Bus, errs *fault.Bus,
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error) ) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error)
} }
RestoreProducer interface { RestoreProducer interface {

View File

@ -228,7 +228,7 @@ func write(
dbcs []data.BackupCollection, dbcs []data.BackupCollection,
errs *fault.Bus, errs *fault.Bus,
) (string, error) { ) (string, error) {
backupStats, _, _, _, err := bup.ConsumeBackupCollections( backupStats, _, _, err := bup.ConsumeBackupCollections(
ctx, ctx,
nil, nil,
dbcs, dbcs,