Combine backup details merging structs (#3056)
Combine the sets of information used for merging backup details so there's fewer things to pass around and we can hide the function used to generate lookup keys This changes the prefix matcher to act like a regular map for the moment (exact match) though OneDrive LocationRef PRs will update that --- #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [ ] 🕐 Yes, but in a later PR - [x] ⛔ No #### Type of change - [x] 🌻 Feature - [ ] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [ ] 🧹 Tech Debt/Cleanup #### Issue(s) * #2486 #### Test Plan - [x] 💪 Manual - [x] ⚡ Unit test - [ ] 💚 E2E
This commit is contained in:
parent
8867b63ccb
commit
52e627189a
@ -7,11 +7,66 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type LocationPrefixMatcher struct {
|
||||
type DetailsMergeInfoer interface {
|
||||
// Count returns the number of items that need to be merged.
|
||||
ItemsToMerge() int
|
||||
// GetNewRepoRef takes the path of the old location of the item and returns
|
||||
// its new RepoRef if the item needs merged. If the item doesn't need merged
|
||||
// returns nil.
|
||||
GetNewRepoRef(oldRef *path.Builder) path.Path
|
||||
// GetNewLocation takes the path of the folder containing the item and returns
|
||||
// the location of the folder containing the item if it was updated. Otherwise
|
||||
// returns nil.
|
||||
GetNewLocation(oldRef *path.Builder) *path.Builder
|
||||
}
|
||||
|
||||
type mergeDetails struct {
|
||||
repoRefs map[string]path.Path
|
||||
locations *locationPrefixMatcher
|
||||
}
|
||||
|
||||
func (m *mergeDetails) ItemsToMerge() int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return len(m.repoRefs)
|
||||
}
|
||||
|
||||
func (m *mergeDetails) addRepoRef(oldRef *path.Builder, newRef path.Path) error {
|
||||
if _, ok := m.repoRefs[oldRef.ShortRef()]; ok {
|
||||
return clues.New("duplicate RepoRef").With("repo_ref", oldRef.String())
|
||||
}
|
||||
|
||||
m.repoRefs[oldRef.ShortRef()] = newRef
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mergeDetails) GetNewRepoRef(oldRef *path.Builder) path.Path {
|
||||
return m.repoRefs[oldRef.ShortRef()]
|
||||
}
|
||||
|
||||
func (m *mergeDetails) addLocation(oldRef, newLoc *path.Builder) error {
|
||||
return m.locations.add(oldRef, newLoc)
|
||||
}
|
||||
|
||||
func (m *mergeDetails) GetNewLocation(oldRef *path.Builder) *path.Builder {
|
||||
return m.locations.longestPrefix(oldRef.String())
|
||||
}
|
||||
|
||||
func newMergeDetails() *mergeDetails {
|
||||
return &mergeDetails{
|
||||
repoRefs: map[string]path.Path{},
|
||||
locations: newLocationPrefixMatcher(),
|
||||
}
|
||||
}
|
||||
|
||||
type locationPrefixMatcher struct {
|
||||
m prefixmatcher.Matcher[*path.Builder]
|
||||
}
|
||||
|
||||
func (m *LocationPrefixMatcher) Add(oldRef path.Path, newLoc *path.Builder) error {
|
||||
func (m *locationPrefixMatcher) add(oldRef, newLoc *path.Builder) error {
|
||||
if _, ok := m.m.Get(oldRef.String()); ok {
|
||||
return clues.New("RepoRef already in matcher").With("repo_ref", oldRef)
|
||||
}
|
||||
@ -21,7 +76,7 @@ func (m *LocationPrefixMatcher) Add(oldRef path.Path, newLoc *path.Builder) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LocationPrefixMatcher) LongestPrefix(oldRef string) *path.Builder {
|
||||
func (m *locationPrefixMatcher) longestPrefix(oldRef string) *path.Builder {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
@ -36,6 +91,6 @@ func (m *LocationPrefixMatcher) LongestPrefix(oldRef string) *path.Builder {
|
||||
return v
|
||||
}
|
||||
|
||||
func NewLocationPrefixMatcher() *LocationPrefixMatcher {
|
||||
return &LocationPrefixMatcher{m: prefixmatcher.NewMatcher[*path.Builder]()}
|
||||
func newLocationPrefixMatcher() *locationPrefixMatcher {
|
||||
return &locationPrefixMatcher{m: prefixmatcher.NewMatcher[*path.Builder]()}
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
package kopia_test
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -8,33 +8,58 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var (
|
||||
testTenant = "a-tenant"
|
||||
testUser = "a-user"
|
||||
service = path.ExchangeService
|
||||
category = path.EmailCategory
|
||||
)
|
||||
|
||||
type LocationPrefixMatcherUnitSuite struct {
|
||||
type DetailsMergeInfoerUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func makePath(
|
||||
t *testing.T,
|
||||
service path.ServiceType,
|
||||
category path.CategoryType,
|
||||
tenant, user string,
|
||||
folders []string,
|
||||
) path.Path {
|
||||
p, err := path.Build(tenant, user, service, category, false, folders...)
|
||||
func TestDetailsMergeInfoerUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &DetailsMergeInfoerUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
// TestRepoRefs is a basic sanity test to ensure lookups are working properly
|
||||
// for stored RepoRefs.
|
||||
func (suite *DetailsMergeInfoerUnitSuite) TestRepoRefs() {
|
||||
t := suite.T()
|
||||
oldRef := makePath(
|
||||
t,
|
||||
[]string{
|
||||
testTenant,
|
||||
service,
|
||||
testUser,
|
||||
category,
|
||||
"folder1",
|
||||
},
|
||||
false).ToBuilder()
|
||||
newRef := makePath(
|
||||
t,
|
||||
[]string{
|
||||
testTenant,
|
||||
service,
|
||||
testUser,
|
||||
category,
|
||||
"folder2",
|
||||
},
|
||||
false)
|
||||
|
||||
dm := newMergeDetails()
|
||||
|
||||
err := dm.addRepoRef(oldRef, newRef)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
got := dm.GetNewRepoRef(oldRef)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, newRef.String(), got.String())
|
||||
|
||||
got = dm.GetNewRepoRef(newRef.ToBuilder())
|
||||
assert.Nil(t, got)
|
||||
}
|
||||
|
||||
type LocationPrefixMatcherUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestLocationPrefixMatcherUnitSuite(t *testing.T) {
|
||||
@ -50,43 +75,52 @@ func (suite *LocationPrefixMatcherUnitSuite) TestAdd_Twice_Fails() {
|
||||
t := suite.T()
|
||||
p := makePath(
|
||||
t,
|
||||
service,
|
||||
category,
|
||||
testTenant,
|
||||
testUser,
|
||||
[]string{"folder1"})
|
||||
[]string{
|
||||
testTenant,
|
||||
service,
|
||||
testUser,
|
||||
category,
|
||||
"folder1",
|
||||
},
|
||||
false).ToBuilder()
|
||||
loc1 := path.Builder{}.Append("folder1")
|
||||
loc2 := path.Builder{}.Append("folder2")
|
||||
|
||||
lpm := kopia.NewLocationPrefixMatcher()
|
||||
lpm := newLocationPrefixMatcher()
|
||||
|
||||
err := lpm.Add(p, loc1)
|
||||
err := lpm.add(p, loc1)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = lpm.Add(p, loc2)
|
||||
err = lpm.add(p, loc2)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *LocationPrefixMatcherUnitSuite) TestAdd_And_Match() {
|
||||
p1 := makePath(
|
||||
suite.T(),
|
||||
service,
|
||||
category,
|
||||
testTenant,
|
||||
testUser,
|
||||
[]string{"folder1"})
|
||||
[]string{
|
||||
testTenant,
|
||||
service,
|
||||
testUser,
|
||||
category,
|
||||
"folder1",
|
||||
},
|
||||
false)
|
||||
|
||||
loc1 := path.Builder{}.Append("folder1")
|
||||
p1Parent, err := p1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
p2 := makePath(
|
||||
suite.T(),
|
||||
service,
|
||||
category,
|
||||
testTenant,
|
||||
testUser,
|
||||
[]string{"folder2"})
|
||||
loc1 := path.Builder{}.Append("folder1")
|
||||
[]string{
|
||||
testTenant,
|
||||
service,
|
||||
testUser,
|
||||
category,
|
||||
"folder2",
|
||||
},
|
||||
false)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
@ -134,14 +168,14 @@ func (suite *LocationPrefixMatcherUnitSuite) TestAdd_And_Match() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
lpm := kopia.NewLocationPrefixMatcher()
|
||||
lpm := newLocationPrefixMatcher()
|
||||
|
||||
for _, input := range test.inputs {
|
||||
err := lpm.Add(input.repoRef, input.locRef)
|
||||
err := lpm.add(input.repoRef.ToBuilder(), input.locRef)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
loc := lpm.LongestPrefix(test.searchKey)
|
||||
loc := lpm.longestPrefix(test.searchKey)
|
||||
test.check(t, loc)
|
||||
|
||||
if loc == nil {
|
||||
|
||||
@ -137,7 +137,7 @@ type corsoProgress struct {
|
||||
deets *details.Builder
|
||||
// toMerge represents items that we don't have in-memory item info for. The
|
||||
// item info for these items should be sourced from a base snapshot later on.
|
||||
toMerge map[string]PrevRefs
|
||||
toMerge *mergeDetails
|
||||
mu sync.RWMutex
|
||||
totalBytes int64
|
||||
errs *fault.Bus
|
||||
@ -195,9 +195,14 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
cp.toMerge[d.prevPath.ShortRef()] = PrevRefs{
|
||||
Repo: d.repoPath,
|
||||
Location: d.locationPath,
|
||||
err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath)
|
||||
if err != nil {
|
||||
cp.errs.AddRecoverable(clues.Wrap(err, "adding item to merge list").
|
||||
With(
|
||||
"service", d.repoPath.Service().String(),
|
||||
"category", d.repoPath.Category().String(),
|
||||
).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
|
||||
return
|
||||
@ -711,7 +716,8 @@ func getTreeNode(roots map[string]*treeMap, pathElements []string) *treeMap {
|
||||
func inflateCollectionTree(
|
||||
ctx context.Context,
|
||||
collections []data.BackupCollection,
|
||||
) (map[string]*treeMap, map[string]path.Path, *LocationPrefixMatcher, error) {
|
||||
toMerge *mergeDetails,
|
||||
) (map[string]*treeMap, map[string]path.Path, error) {
|
||||
roots := make(map[string]*treeMap)
|
||||
// Contains the old path for collections that have been moved or renamed.
|
||||
// Allows resolving what the new path should be when walking the base
|
||||
@ -720,28 +726,18 @@ func inflateCollectionTree(
|
||||
// Temporary variable just to track the things that have been marked as
|
||||
// changed while keeping a reference to their path.
|
||||
changedPaths := []path.Path{}
|
||||
// updatedLocations maps from the collections RepoRef to the updated location
|
||||
// path for all moved collections. New collections aren't tracked because we
|
||||
// will have their location explicitly. This is used by the backup details
|
||||
// merge code to update locations for items in nested folders that got moved
|
||||
// when the top-level folder got moved. The nested folder may not generate a
|
||||
// delta result but will need the location updated.
|
||||
//
|
||||
// This could probably use a path.Builder as the value instead of a string if
|
||||
// we wanted.
|
||||
updatedLocations := NewLocationPrefixMatcher()
|
||||
|
||||
for _, s := range collections {
|
||||
switch s.State() {
|
||||
case data.DeletedState:
|
||||
if s.PreviousPath() == nil {
|
||||
return nil, nil, nil, clues.New("nil previous path on deleted collection")
|
||||
return nil, nil, clues.New("nil previous path on deleted collection")
|
||||
}
|
||||
|
||||
changedPaths = append(changedPaths, s.PreviousPath())
|
||||
|
||||
if _, ok := updatedPaths[s.PreviousPath().String()]; ok {
|
||||
return nil, nil, nil, clues.New("multiple previous state changes to collection").
|
||||
return nil, nil, clues.New("multiple previous state changes to collection").
|
||||
With("collection_previous_path", s.PreviousPath())
|
||||
}
|
||||
|
||||
@ -753,7 +749,7 @@ func inflateCollectionTree(
|
||||
changedPaths = append(changedPaths, s.PreviousPath())
|
||||
|
||||
if _, ok := updatedPaths[s.PreviousPath().String()]; ok {
|
||||
return nil, nil, nil, clues.New("multiple previous state changes to collection").
|
||||
return nil, nil, clues.New("multiple previous state changes to collection").
|
||||
With("collection_previous_path", s.PreviousPath())
|
||||
}
|
||||
|
||||
@ -763,25 +759,25 @@ func inflateCollectionTree(
|
||||
// TODO(ashmrtn): Get old location ref and add it to the prefix matcher.
|
||||
lp, ok := s.(data.LocationPather)
|
||||
if ok && s.PreviousPath() != nil {
|
||||
if err := updatedLocations.Add(s.PreviousPath(), lp.LocationPath()); err != nil {
|
||||
return nil, nil, nil, clues.Wrap(err, "building updated location set").
|
||||
if err := toMerge.addLocation(s.PreviousPath().ToBuilder(), lp.LocationPath()); err != nil {
|
||||
return nil, nil, clues.Wrap(err, "building updated location set").
|
||||
With("collection_location", lp.LocationPath())
|
||||
}
|
||||
}
|
||||
|
||||
if s.FullPath() == nil || len(s.FullPath().Elements()) == 0 {
|
||||
return nil, nil, nil, clues.New("no identifier for collection")
|
||||
return nil, nil, clues.New("no identifier for collection")
|
||||
}
|
||||
|
||||
node := getTreeNode(roots, s.FullPath().Elements())
|
||||
if node == nil {
|
||||
return nil, nil, nil, clues.New("getting tree node").With("collection_full_path", s.FullPath())
|
||||
return nil, nil, clues.New("getting tree node").With("collection_full_path", s.FullPath())
|
||||
}
|
||||
|
||||
// Make sure there's only a single collection adding items for any given
|
||||
// path in the new hierarchy.
|
||||
if node.collection != nil {
|
||||
return nil, nil, nil, clues.New("multiple instances of collection").With("collection_full_path", s.FullPath())
|
||||
return nil, nil, clues.New("multiple instances of collection").With("collection_full_path", s.FullPath())
|
||||
}
|
||||
|
||||
node.collection = s
|
||||
@ -799,11 +795,11 @@ func inflateCollectionTree(
|
||||
}
|
||||
|
||||
if node.collection != nil && node.collection.State() == data.NotMovedState {
|
||||
return nil, nil, nil, clues.New("conflicting states for collection").With("changed_path", p)
|
||||
return nil, nil, clues.New("conflicting states for collection").With("changed_path", p)
|
||||
}
|
||||
}
|
||||
|
||||
return roots, updatedPaths, updatedLocations, nil
|
||||
return roots, updatedPaths, nil
|
||||
}
|
||||
|
||||
// traverseBaseDir is an unoptimized function that reads items in a directory
|
||||
@ -1034,10 +1030,10 @@ func inflateDirTree(
|
||||
collections []data.BackupCollection,
|
||||
globalExcludeSet map[string]map[string]struct{},
|
||||
progress *corsoProgress,
|
||||
) (fs.Directory, *LocationPrefixMatcher, error) {
|
||||
roots, updatedPaths, updatedLocations, err := inflateCollectionTree(ctx, collections)
|
||||
) (fs.Directory, error) {
|
||||
roots, updatedPaths, err := inflateCollectionTree(ctx, collections, progress.toMerge)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "inflating collection tree")
|
||||
return nil, clues.Wrap(err, "inflating collection tree")
|
||||
}
|
||||
|
||||
baseIDs := make([]manifest.ID, 0, len(baseSnaps))
|
||||
@ -1055,12 +1051,12 @@ func inflateDirTree(
|
||||
|
||||
for _, snap := range baseSnaps {
|
||||
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {
|
||||
return nil, nil, clues.Wrap(err, "inflating base snapshot tree(s)")
|
||||
return nil, clues.Wrap(err, "inflating base snapshot tree(s)")
|
||||
}
|
||||
}
|
||||
|
||||
if len(roots) > 1 {
|
||||
return nil, nil, clues.New("multiple root directories")
|
||||
return nil, clues.New("multiple root directories")
|
||||
}
|
||||
|
||||
var res fs.Directory
|
||||
@ -1068,11 +1064,11 @@ func inflateDirTree(
|
||||
for dirName, dir := range roots {
|
||||
tmp, err := buildKopiaDirs(dirName, dir, globalExcludeSet, progress)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = tmp
|
||||
}
|
||||
|
||||
return res, updatedLocations, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -532,7 +532,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: map[string]PrevRefs{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
@ -542,7 +542,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
|
||||
cp.FinishedFile(suite.targetFileName, nil)
|
||||
|
||||
assert.Empty(t, cp.toMerge)
|
||||
assert.Equal(t, 0, cp.toMerge.ItemsToMerge())
|
||||
|
||||
// Gather information about the current state.
|
||||
var (
|
||||
@ -587,6 +587,11 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarchy() {
|
||||
type expectedRef struct {
|
||||
oldRef *path.Builder
|
||||
newRef path.Path
|
||||
}
|
||||
|
||||
t := suite.T()
|
||||
|
||||
prevPath := makePath(
|
||||
@ -595,10 +600,11 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
true,
|
||||
)
|
||||
|
||||
expectedToMerge := map[string]PrevRefs{
|
||||
prevPath.ShortRef(): {
|
||||
Repo: suite.targetFilePath,
|
||||
Location: suite.targetFileLoc,
|
||||
// Location is sourced from collections now so we don't need to check it here.
|
||||
expectedToMerge := []expectedRef{
|
||||
{
|
||||
oldRef: prevPath.ToBuilder(),
|
||||
newRef: suite.targetFilePath,
|
||||
},
|
||||
}
|
||||
|
||||
@ -608,7 +614,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: db,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: map[string]PrevRefs{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
@ -623,8 +629,16 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
require.Len(t, cp.pending, 1)
|
||||
|
||||
cp.FinishedFile(suite.targetFileName, nil)
|
||||
assert.Equal(t, expectedToMerge, cp.toMerge)
|
||||
assert.Empty(t, cp.deets)
|
||||
|
||||
for _, expected := range expectedToMerge {
|
||||
gotRef := cp.toMerge.GetNewRepoRef(expected.oldRef)
|
||||
if !assert.NotNil(t, gotRef) {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.newRef.String(), gotRef.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
|
||||
@ -710,36 +724,41 @@ func (suite *HierarchyBuilderUnitSuite) TestPopulatesPrefixMatcher() {
|
||||
|
||||
cols := []data.BackupCollection{c1, c2, c3}
|
||||
|
||||
_, locPaths, err := inflateDirTree(ctx, nil, nil, cols, nil, nil)
|
||||
cp := corsoProgress{
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
_, err := inflateDirTree(ctx, nil, nil, cols, nil, &cp)
|
||||
require.NoError(t, err)
|
||||
|
||||
table := []struct {
|
||||
inputPath string
|
||||
inputPath *path.Builder
|
||||
check require.ValueAssertionFunc
|
||||
expectedLoc *path.Builder
|
||||
}{
|
||||
{
|
||||
inputPath: p1.String(),
|
||||
inputPath: p1.ToBuilder(),
|
||||
check: require.NotNil,
|
||||
expectedLoc: path.Builder{}.Append(p1.Folders()...),
|
||||
},
|
||||
{
|
||||
inputPath: p3.String(),
|
||||
inputPath: p3.ToBuilder(),
|
||||
check: require.NotNil,
|
||||
expectedLoc: path.Builder{}.Append(p2.Folders()...),
|
||||
},
|
||||
{
|
||||
inputPath: p4.String(),
|
||||
inputPath: p4.ToBuilder(),
|
||||
check: require.Nil,
|
||||
expectedLoc: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.inputPath, func() {
|
||||
suite.Run(test.inputPath.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
loc := locPaths.LongestPrefix(test.inputPath)
|
||||
loc := cp.toMerge.GetNewLocation(test.inputPath)
|
||||
test.check(t, loc)
|
||||
|
||||
if loc == nil {
|
||||
@ -776,6 +795,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
@ -801,7 +821,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
||||
// - emails
|
||||
// - Inbox
|
||||
// - 42 separate files
|
||||
dirTree, _, err := inflateDirTree(ctx, nil, nil, collections, nil, progress)
|
||||
dirTree, err := inflateDirTree(ctx, nil, nil, collections, nil, progress)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
||||
@ -894,10 +914,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
dirTree, _, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
||||
@ -998,7 +1019,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
_, _, err := inflateDirTree(ctx, nil, nil, test.layout, nil, nil)
|
||||
progress := &corsoProgress{
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
_, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -1088,6 +1114,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
@ -1110,7 +1137,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
||||
cols = append(cols, mc)
|
||||
}
|
||||
|
||||
_, _, err := inflateDirTree(ctx, nil, nil, cols, nil, progress)
|
||||
_, err := inflateDirTree(ctx, nil, nil, cols, nil, progress)
|
||||
require.Error(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -1379,13 +1406,14 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
}
|
||||
|
||||
dirTree, _, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
msw,
|
||||
[]IncrementalBase{
|
||||
@ -2158,13 +2186,14 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
}
|
||||
|
||||
dirTree, _, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
msw,
|
||||
[]IncrementalBase{
|
||||
@ -2306,6 +2335,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
mc := mockconnector.NewMockExchangeCollection(suite.testStoragePath, suite.testStoragePath, 1)
|
||||
@ -2327,7 +2357,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
// - file3
|
||||
// - work
|
||||
// - file4
|
||||
dirTree, _, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
msw,
|
||||
[]IncrementalBase{
|
||||
@ -2407,6 +2437,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
mc := mockconnector.NewMockExchangeCollection(archiveStorePath, archiveLocPath, 1)
|
||||
@ -2431,7 +2462,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_HandleEmptyBase()
|
||||
// - emails
|
||||
// - Archive
|
||||
// - file2
|
||||
dirTree, _, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
msw,
|
||||
[]IncrementalBase{
|
||||
@ -2662,6 +2693,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
@ -2680,7 +2712,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
|
||||
collections := []data.BackupCollection{mc}
|
||||
|
||||
dirTree, _, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
msw,
|
||||
[]IncrementalBase{
|
||||
|
||||
@ -124,13 +124,6 @@ type IncrementalBase struct {
|
||||
SubtreePaths []*path.Builder
|
||||
}
|
||||
|
||||
// PrevRefs hold the repoRef and locationRef from the items
|
||||
// that need to be merged in from prior snapshots.
|
||||
type PrevRefs struct {
|
||||
Repo path.Path
|
||||
Location *path.Builder
|
||||
}
|
||||
|
||||
// ConsumeBackupCollections takes a set of collections and creates a kopia snapshot
|
||||
// with the data that they contain. previousSnapshots is used for incremental
|
||||
// backups and should represent the base snapshot from which metadata is sourced
|
||||
@ -145,22 +138,22 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
tags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Bus,
|
||||
) (*BackupStats, *details.Builder, map[string]PrevRefs, *LocationPrefixMatcher, error) {
|
||||
) (*BackupStats, *details.Builder, DetailsMergeInfoer, error) {
|
||||
if w.c == nil {
|
||||
return nil, nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
}
|
||||
|
||||
ctx, end := diagnostics.Span(ctx, "kopia:consumeBackupCollections")
|
||||
defer end()
|
||||
|
||||
if len(collections) == 0 && len(globalExcludeSet) == 0 {
|
||||
return &BackupStats{}, &details.Builder{}, nil, nil, nil
|
||||
return &BackupStats{}, &details.Builder{}, nil, nil
|
||||
}
|
||||
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
deets: &details.Builder{},
|
||||
toMerge: map[string]PrevRefs{},
|
||||
toMerge: newMergeDetails(),
|
||||
errs: errs,
|
||||
}
|
||||
|
||||
@ -172,7 +165,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
base = previousSnapshots
|
||||
}
|
||||
|
||||
dirTree, updatedLocations, err := inflateDirTree(
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
w.c,
|
||||
base,
|
||||
@ -180,7 +173,7 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
globalExcludeSet,
|
||||
progress)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, clues.Wrap(err, "building kopia directories")
|
||||
return nil, nil, nil, clues.Wrap(err, "building kopia directories")
|
||||
}
|
||||
|
||||
s, err := w.makeSnapshotWithRoot(
|
||||
@ -190,10 +183,10 @@ func (w Wrapper) ConsumeBackupCollections(
|
||||
tags,
|
||||
progress)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return s, progress.deets, progress.toMerge, updatedLocations, progress.errs.Failure()
|
||||
return s, progress.deets, progress.toMerge, progress.errs.Failure()
|
||||
}
|
||||
|
||||
func (w Wrapper) makeSnapshotWithRoot(
|
||||
|
||||
@ -276,7 +276,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
stats, deets, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
prevSnaps,
|
||||
collections,
|
||||
@ -423,7 +423,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
t := suite.T()
|
||||
collections := test.cols()
|
||||
|
||||
stats, deets, prevShortRefs, _, err := suite.w.ConsumeBackupCollections(
|
||||
stats, deets, prevShortRefs, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
prevSnaps,
|
||||
collections,
|
||||
@ -459,13 +459,9 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
||||
assert.False(t, onedrive.IsMetaFile(entry.RepoRef), "metadata entry in details")
|
||||
}
|
||||
|
||||
assert.Len(t, prevShortRefs, 0)
|
||||
for _, prevRef := range prevShortRefs {
|
||||
assert.False(
|
||||
t,
|
||||
onedrive.IsMetaFile(prevRef.Repo.String()),
|
||||
"metadata entry in base details")
|
||||
}
|
||||
// Shouldn't have any items to merge because the cached files are metadata
|
||||
// files.
|
||||
assert.Equal(t, 0, prevShortRefs.ItemsToMerge())
|
||||
|
||||
checkSnapshotTags(
|
||||
t,
|
||||
@ -525,7 +521,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
fp2, err := suite.storePath2.Append(dc2.Names[0], true)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
stats, _, _, _, err := w.ConsumeBackupCollections(
|
||||
stats, _, _, err := w.ConsumeBackupCollections(
|
||||
ctx,
|
||||
nil,
|
||||
[]data.BackupCollection{dc1, dc2},
|
||||
@ -644,7 +640,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
},
|
||||
}
|
||||
|
||||
stats, deets, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
nil,
|
||||
collections,
|
||||
@ -706,7 +702,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
s, d, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
s, d, _, err := suite.w.ConsumeBackupCollections(
|
||||
ctx,
|
||||
nil,
|
||||
test.collections,
|
||||
@ -866,7 +862,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
tags[k] = ""
|
||||
}
|
||||
|
||||
stats, deets, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
nil,
|
||||
collections,
|
||||
@ -1018,7 +1014,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
}
|
||||
}
|
||||
|
||||
stats, _, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
[]IncrementalBase{
|
||||
{
|
||||
|
||||
@ -264,7 +264,7 @@ func (op *BackupOperation) do(
|
||||
|
||||
ctx = clues.Add(ctx, "coll_count", len(cs))
|
||||
|
||||
writeStats, deets, toMerge, updatedLocs, err := consumeBackupCollections(
|
||||
writeStats, deets, toMerge, err := consumeBackupCollections(
|
||||
ctx,
|
||||
op.kopia,
|
||||
op.account.ID(),
|
||||
@ -287,7 +287,6 @@ func (op *BackupOperation) do(
|
||||
detailsStore,
|
||||
mans,
|
||||
toMerge,
|
||||
updatedLocs,
|
||||
deets,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
@ -412,7 +411,7 @@ func consumeBackupCollections(
|
||||
backupID model.StableID,
|
||||
isIncremental bool,
|
||||
errs *fault.Bus,
|
||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error) {
|
||||
) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
|
||||
complete, closer := observe.MessageWithCompletion(ctx, "Backing up data")
|
||||
defer func() {
|
||||
complete <- struct{}{}
|
||||
@ -441,7 +440,7 @@ func consumeBackupCollections(
|
||||
for _, reason := range m.Reasons {
|
||||
pb, err := builderFromReason(ctx, tenantID, reason)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases")
|
||||
return nil, nil, nil, clues.Wrap(err, "getting subtree paths for bases")
|
||||
}
|
||||
|
||||
paths = append(paths, pb)
|
||||
@ -477,7 +476,7 @@ func consumeBackupCollections(
|
||||
"base_backup_id", mbID)
|
||||
}
|
||||
|
||||
kopiaStats, deets, itemsSourcedFromBase, updatedLocs, err := bc.ConsumeBackupCollections(
|
||||
kopiaStats, deets, itemsSourcedFromBase, err := bc.ConsumeBackupCollections(
|
||||
ctx,
|
||||
bases,
|
||||
cs,
|
||||
@ -487,10 +486,10 @@ func consumeBackupCollections(
|
||||
errs)
|
||||
if err != nil {
|
||||
if kopiaStats == nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return nil, nil, nil, nil, clues.Stack(err).With(
|
||||
return nil, nil, nil, clues.Stack(err).With(
|
||||
"kopia_errors", kopiaStats.ErrorCount,
|
||||
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
|
||||
}
|
||||
@ -502,7 +501,7 @@ func consumeBackupCollections(
|
||||
"kopia_ignored_errors", kopiaStats.IgnoredErrorCount)
|
||||
}
|
||||
|
||||
return kopiaStats, deets, itemsSourcedFromBase, updatedLocs, err
|
||||
return kopiaStats, deets, itemsSourcedFromBase, err
|
||||
}
|
||||
|
||||
func matchesReason(reasons []kopia.Reason, p path.Path) bool {
|
||||
@ -522,13 +521,12 @@ func mergeDetails(
|
||||
ms *store.Wrapper,
|
||||
detailsStore streamstore.Streamer,
|
||||
mans []*kopia.ManifestEntry,
|
||||
shortRefsFromPrevBackup map[string]kopia.PrevRefs,
|
||||
updatedLocs *kopia.LocationPrefixMatcher,
|
||||
dataFromBackup kopia.DetailsMergeInfoer,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
// Don't bother loading any of the base details if there's nothing we need to merge.
|
||||
if len(shortRefsFromPrevBackup) == 0 {
|
||||
if dataFromBackup.ItemsToMerge() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -581,17 +579,16 @@ func mergeDetails(
|
||||
continue
|
||||
}
|
||||
|
||||
prev, ok := shortRefsFromPrevBackup[rr.ShortRef()]
|
||||
if !ok {
|
||||
pb := rr.ToBuilder()
|
||||
|
||||
newPath := dataFromBackup.GetNewRepoRef(pb)
|
||||
if newPath == nil {
|
||||
// This entry was not sourced from a base snapshot or cached from a
|
||||
// previous backup, skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
newPath := prev.Repo
|
||||
// Locations are done by collection RepoRef so remove the item from the
|
||||
// input.
|
||||
newLoc := updatedLocs.LongestPrefix(rr.ToBuilder().Dir().String())
|
||||
newLoc := dataFromBackup.GetNewLocation(pb.Dir())
|
||||
|
||||
// Fixup paths in the item.
|
||||
item := entry.ItemInfo
|
||||
@ -637,10 +634,12 @@ func mergeDetails(
|
||||
"base_item_count_added", manifestAddedEntries)
|
||||
}
|
||||
|
||||
if addedEntries != len(shortRefsFromPrevBackup) {
|
||||
if addedEntries != dataFromBackup.ItemsToMerge() {
|
||||
return clues.New("incomplete migration of backup details").
|
||||
WithClues(ctx).
|
||||
With("item_count", addedEntries, "expected_item_count", len(shortRefsFromPrevBackup))
|
||||
With(
|
||||
"item_count", addedEntries,
|
||||
"expected_item_count", dataFromBackup.ItemsToMerge())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -102,12 +102,12 @@ func (mbu mockBackupConsumer) ConsumeBackupCollections(
|
||||
tags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Bus,
|
||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error) {
|
||||
) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error) {
|
||||
if mbu.checkFunc != nil {
|
||||
mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
|
||||
}
|
||||
|
||||
return &kopia.BackupStats{}, &details.Builder{}, nil, nil, nil
|
||||
return &kopia.BackupStats{}, &details.Builder{}, nil, nil
|
||||
}
|
||||
|
||||
// ----- model store for backups
|
||||
@ -181,6 +181,47 @@ func (mbs mockBackupStorer) Update(context.Context, model.Schema, model.Model) e
|
||||
return clues.New("not implemented")
|
||||
}
|
||||
|
||||
// ----- model store for backups
|
||||
|
||||
type mockDetailsMergeInfoer struct {
|
||||
repoRefs map[string]path.Path
|
||||
locs map[string]*path.Builder
|
||||
}
|
||||
|
||||
func (m *mockDetailsMergeInfoer) add(oldRef, newRef path.Path, newLoc *path.Builder) {
|
||||
oldPB := oldRef.ToBuilder()
|
||||
// Items are indexed individually.
|
||||
m.repoRefs[oldPB.ShortRef()] = newRef
|
||||
|
||||
if newLoc != nil {
|
||||
// Locations are indexed by directory.
|
||||
m.locs[oldPB.Dir().ShortRef()] = newLoc
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockDetailsMergeInfoer) GetNewRepoRef(oldRef *path.Builder) path.Path {
|
||||
return m.repoRefs[oldRef.ShortRef()]
|
||||
}
|
||||
|
||||
func (m *mockDetailsMergeInfoer) GetNewLocation(oldRef *path.Builder) *path.Builder {
|
||||
return m.locs[oldRef.ShortRef()]
|
||||
}
|
||||
|
||||
func (m *mockDetailsMergeInfoer) ItemsToMerge() int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return len(m.repoRefs)
|
||||
}
|
||||
|
||||
func newMockDetailsMergeInfoer() *mockDetailsMergeInfoer {
|
||||
return &mockDetailsMergeInfoer{
|
||||
repoRefs: map[string]path.Path{},
|
||||
locs: map[string]*path.Builder{},
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper funcs
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -669,12 +710,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
populatedModels map[model.StableID]backup.Backup
|
||||
populatedDetails map[string]*details.Details
|
||||
inputMans []*kopia.ManifestEntry
|
||||
inputShortRefsFromPrevBackup map[string]kopia.PrevRefs
|
||||
prefixMatcher *kopia.LocationPrefixMatcher
|
||||
name string
|
||||
populatedModels map[model.StableID]backup.Backup
|
||||
populatedDetails map[string]*details.Details
|
||||
inputMans []*kopia.ManifestEntry
|
||||
mdm *mockDetailsMergeInfoer
|
||||
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
expectedEntries []*details.DetailsEntry
|
||||
@ -686,30 +726,19 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
expectedEntries: []*details.DetailsEntry{},
|
||||
},
|
||||
{
|
||||
name: "EmptyShortRefsFromPrevBackup",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{},
|
||||
errCheck: assert.NoError,
|
||||
name: "EmptyShortRefsFromPrevBackup",
|
||||
mdm: newMockDetailsMergeInfoer(),
|
||||
errCheck: assert.NoError,
|
||||
// Use empty slice so we don't error out on nil != empty.
|
||||
expectedEntries: []*details.DetailsEntry{},
|
||||
},
|
||||
{
|
||||
name: "BackupIDNotFound",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -723,22 +752,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "DetailsIDNotFound",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -760,32 +778,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "BaseMissingItems",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
itemPath2.ShortRef(): {
|
||||
Repo: itemPath2,
|
||||
Location: locationPath2,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
res.add(itemPath2, itemPath2, locationPath2)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
rr, err = itemPath2.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath2)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -811,22 +809,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "TooManyItems",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -858,22 +845,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "BadBaseRepoRef",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath2,
|
||||
Location: locationPath2,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath2, locationPath2)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath2)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -918,25 +894,24 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "BadOneDrivePath",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: makePath(
|
||||
suite.T(),
|
||||
[]string{
|
||||
itemPath1.Tenant(),
|
||||
path.OneDriveService.String(),
|
||||
itemPath1.ResourceOwner(),
|
||||
path.FilesCategory.String(),
|
||||
"personal",
|
||||
"item1",
|
||||
},
|
||||
true,
|
||||
),
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
return p
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
p := makePath(
|
||||
suite.T(),
|
||||
[]string{
|
||||
itemPath1.Tenant(),
|
||||
path.OneDriveService.String(),
|
||||
itemPath1.ResourceOwner(),
|
||||
path.FilesCategory.String(),
|
||||
"personal",
|
||||
"item1",
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
res.add(itemPath1, p, nil)
|
||||
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -962,22 +937,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "ItemMerged",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1006,14 +970,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "ItemMergedNoLocation",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
return p
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, nil)
|
||||
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1042,22 +1003,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "ItemMergedSameLocation",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1086,22 +1036,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "ItemMergedExtraItemsInBase",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1131,22 +1070,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "ItemMoved",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath2,
|
||||
Location: locationPath2,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath2, locationPath2)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath2)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1175,32 +1103,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "MultipleBases",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
itemPath3.ShortRef(): {
|
||||
Repo: itemPath3,
|
||||
Location: locationPath3,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
res.add(itemPath3, itemPath3, locationPath3)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
rr, err = itemPath3.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath3)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1247,22 +1155,11 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
},
|
||||
{
|
||||
name: "SomeBasesIncomplete",
|
||||
inputShortRefsFromPrevBackup: map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locationPath1,
|
||||
},
|
||||
},
|
||||
prefixMatcher: func() *kopia.LocationPrefixMatcher {
|
||||
p := kopia.NewLocationPrefixMatcher()
|
||||
mdm: func() *mockDetailsMergeInfoer {
|
||||
res := newMockDetailsMergeInfoer()
|
||||
res.add(itemPath1, itemPath1, locationPath1)
|
||||
|
||||
rr, err := itemPath1.Dir()
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
err = p.Add(rr, locationPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
return res
|
||||
}(),
|
||||
inputMans: []*kopia.ManifestEntry{
|
||||
{
|
||||
@ -1322,8 +1219,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
w,
|
||||
mds,
|
||||
test.inputMans,
|
||||
test.inputShortRefsFromPrevBackup,
|
||||
test.prefixMatcher,
|
||||
test.mdm,
|
||||
&deets,
|
||||
fault.New(true))
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
@ -1373,13 +1269,6 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
Category: itemPath1.Category(),
|
||||
}
|
||||
|
||||
inputToMerge = map[string]kopia.PrevRefs{
|
||||
itemPath1.ShortRef(): {
|
||||
Repo: itemPath1,
|
||||
Location: locPath1,
|
||||
},
|
||||
}
|
||||
|
||||
inputMans = []*kopia.ManifestEntry{
|
||||
{
|
||||
Manifest: makeManifest(t, backup1.ID, ""),
|
||||
@ -1398,12 +1287,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
// later = now.Add(42 * time.Minute)
|
||||
)
|
||||
|
||||
itemDir, err := itemPath1.Dir()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
prefixMatcher := kopia.NewLocationPrefixMatcher()
|
||||
err = prefixMatcher.Add(itemDir, locPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
mdm := newMockDetailsMergeInfoer()
|
||||
mdm.add(itemPath1, itemPath1, locPath1)
|
||||
|
||||
itemDetails := makeDetailsEntry(t, itemPath1, locPath1, itemSize, false)
|
||||
// itemDetails.Exchange.Modified = now
|
||||
@ -1438,13 +1323,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
deets = details.Builder{}
|
||||
)
|
||||
|
||||
err = mergeDetails(
|
||||
err := mergeDetails(
|
||||
ctx,
|
||||
w,
|
||||
mds,
|
||||
inputMans,
|
||||
inputToMerge,
|
||||
prefixMatcher,
|
||||
mdm,
|
||||
&deets,
|
||||
fault.New(true))
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -37,7 +37,7 @@ type (
|
||||
tags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Bus,
|
||||
) (*kopia.BackupStats, *details.Builder, map[string]kopia.PrevRefs, *kopia.LocationPrefixMatcher, error)
|
||||
) (*kopia.BackupStats, *details.Builder, kopia.DetailsMergeInfoer, error)
|
||||
}
|
||||
|
||||
RestoreProducer interface {
|
||||
|
||||
@ -228,7 +228,7 @@ func write(
|
||||
dbcs []data.BackupCollection,
|
||||
errs *fault.Bus,
|
||||
) (string, error) {
|
||||
backupStats, _, _, _, err := bup.ConsumeBackupCollections(
|
||||
backupStats, _, _, err := bup.ConsumeBackupCollections(
|
||||
ctx,
|
||||
nil,
|
||||
dbcs,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user