Read items within a collection from kopia on demand (#3506)
Don't read all the files that we need to restore, just the collections in the beginning. The rest of them are read using Fetch on demand when they will be restored. I haven't tested for any changes in memory consumption, but this brings down time taken for "Enumerating items in repository" to <5s even for huge number of files. <!-- PR description--> --- #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [x] 🕐 Yes, but in a later PR - [ ] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [x] 🌻 Feature - [ ] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [ ] 🧹 Tech Debt/Cleanup #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * https://github.com/alcionai/corso/issues/3011 * closes https://github.com/alcionai/corso/issues/3440 * closes https://github.com/alcionai/corso/issues/3537 #### Test Plan <!-- How will this be tested prior to merging.--> - [x] 💪 Manual - [ ] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
7e154bfe51
commit
7d1d9295eb
@ -42,7 +42,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
lastBackupVersion int,
|
lastBackupVersion int,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) {
|
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
||||||
ctx, end := diagnostics.Span(
|
ctx, end := diagnostics.Span(
|
||||||
ctx,
|
ctx,
|
||||||
"gc:produceBackupCollections",
|
"gc:produceBackupCollections",
|
||||||
@ -57,7 +57,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
|
|
||||||
err := verifyBackupInputs(sels, gc.IDNameLookup.IDs())
|
err := verifyBackupInputs(sels, gc.IDNameLookup.IDs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Stack(err).WithClues(ctx)
|
return nil, nil, false, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
|
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
|
||||||
@ -66,16 +66,17 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
path.ServiceType(sels.Service),
|
path.ServiceType(sels.Service),
|
||||||
sels.DiscreteOwner)
|
sels.DiscreteOwner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !serviceEnabled {
|
if !serviceEnabled {
|
||||||
return []data.BackupCollection{}, nil, nil
|
return []data.BackupCollection{}, nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
colls []data.BackupCollection
|
colls []data.BackupCollection
|
||||||
ssmb *prefixmatcher.StringSetMatcher
|
ssmb *prefixmatcher.StringSetMatcher
|
||||||
|
canUsePreviousBackup bool
|
||||||
)
|
)
|
||||||
|
|
||||||
if !canMakeDeltaQueries {
|
if !canMakeDeltaQueries {
|
||||||
@ -86,7 +87,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
|
|
||||||
switch sels.Service {
|
switch sels.Service {
|
||||||
case selectors.ServiceExchange:
|
case selectors.ServiceExchange:
|
||||||
colls, ssmb, err = exchange.DataCollections(
|
colls, ssmb, canUsePreviousBackup, err = exchange.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
gc.AC,
|
gc.AC,
|
||||||
sels,
|
sels,
|
||||||
@ -97,11 +98,11 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case selectors.ServiceOneDrive:
|
case selectors.ServiceOneDrive:
|
||||||
colls, ssmb, err = onedrive.DataCollections(
|
colls, ssmb, canUsePreviousBackup, err = onedrive.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
gc.AC,
|
gc.AC,
|
||||||
sels,
|
sels,
|
||||||
@ -113,11 +114,11 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case selectors.ServiceSharePoint:
|
case selectors.ServiceSharePoint:
|
||||||
colls, ssmb, err = sharepoint.DataCollections(
|
colls, ssmb, canUsePreviousBackup, err = sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
gc.AC,
|
gc.AC,
|
||||||
sels,
|
sels,
|
||||||
@ -128,11 +129,11 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, nil, clues.Wrap(clues.New(sels.Service.String()), "service not supported").WithClues(ctx)
|
return nil, nil, false, clues.Wrap(clues.New(sels.Service.String()), "service not supported").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range colls {
|
for _, c := range colls {
|
||||||
@ -147,7 +148,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return colls, ssmb, nil
|
return colls, ssmb, canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBackupRunnable verifies that the users provided has the services enabled and
|
// IsBackupRunnable verifies that the users provided has the services enabled and
|
||||||
|
|||||||
@ -127,7 +127,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
|||||||
ctrlOpts := control.Defaults()
|
ctrlOpts := control.Defaults()
|
||||||
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
|
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
|
||||||
|
|
||||||
collections, excludes, err := exchange.DataCollections(
|
collections, excludes, canUsePreviousBackup, err := exchange.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
suite.ac,
|
suite.ac,
|
||||||
sel,
|
sel,
|
||||||
@ -138,6 +138,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
for range collections {
|
for range collections {
|
||||||
@ -237,7 +238,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
collections, excludes, err := connector.ProduceBackupCollections(
|
collections, excludes, canUsePreviousBackup, err := connector.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
test.getSelector(t),
|
test.getSelector(t),
|
||||||
test.getSelector(t),
|
test.getSelector(t),
|
||||||
@ -246,6 +247,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
control.Defaults(),
|
control.Defaults(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
assert.Error(t, err, clues.ToCore(err))
|
||||||
|
assert.False(t, canUsePreviousBackup, "can use previous backup")
|
||||||
assert.Empty(t, collections)
|
assert.Empty(t, collections)
|
||||||
assert.Nil(t, excludes)
|
assert.Nil(t, excludes)
|
||||||
})
|
})
|
||||||
@ -295,7 +297,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
|||||||
|
|
||||||
sel := test.getSelector()
|
sel := test.getSelector()
|
||||||
|
|
||||||
collections, excludes, err := sharepoint.DataCollections(
|
collections, excludes, canUsePreviousBackup, err := sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
suite.ac,
|
suite.ac,
|
||||||
sel,
|
sel,
|
||||||
@ -306,6 +308,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
|||||||
control.Defaults(),
|
control.Defaults(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
// Not expecting excludes as this isn't an incremental backup.
|
// Not expecting excludes as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
@ -381,7 +384,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
|||||||
|
|
||||||
sel.SetDiscreteOwnerIDName(id, name)
|
sel.SetDiscreteOwnerIDName(id, name)
|
||||||
|
|
||||||
cols, excludes, err := gc.ProduceBackupCollections(
|
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
inMock.NewProvider(id, name),
|
inMock.NewProvider(id, name),
|
||||||
sel.Selector,
|
sel.Selector,
|
||||||
@ -390,6 +393,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
|||||||
control.Defaults(),
|
control.Defaults(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
require.Len(t, cols, 2) // 1 collection, 1 path prefix directory to ensure the root path exists.
|
require.Len(t, cols, 2) // 1 collection, 1 path prefix directory to ensure the root path exists.
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
@ -427,7 +431,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
|||||||
|
|
||||||
sel.SetDiscreteOwnerIDName(id, name)
|
sel.SetDiscreteOwnerIDName(id, name)
|
||||||
|
|
||||||
cols, excludes, err := gc.ProduceBackupCollections(
|
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
inMock.NewProvider(id, name),
|
inMock.NewProvider(id, name),
|
||||||
sel.Selector,
|
sel.Selector,
|
||||||
@ -436,6 +440,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
|||||||
control.Defaults(),
|
control.Defaults(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
assert.Less(t, 0, len(cols))
|
assert.Less(t, 0, len(cols))
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
@ -64,8 +65,7 @@ type DeltaPath struct {
|
|||||||
func parseMetadataCollections(
|
func parseMetadataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
colls []data.RestoreCollection,
|
colls []data.RestoreCollection,
|
||||||
errs *fault.Bus,
|
) (CatDeltaPaths, bool, error) {
|
||||||
) (CatDeltaPaths, error) {
|
|
||||||
// cdp stores metadata
|
// cdp stores metadata
|
||||||
cdp := CatDeltaPaths{
|
cdp := CatDeltaPaths{
|
||||||
path.ContactsCategory: {},
|
path.ContactsCategory: {},
|
||||||
@ -81,6 +81,10 @@ func parseMetadataCollections(
|
|||||||
path.EventsCategory: {},
|
path.EventsCategory: {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// errors from metadata items should not stop the backup,
|
||||||
|
// but it should prevent us from using previous backups
|
||||||
|
errs := fault.New(true)
|
||||||
|
|
||||||
for _, coll := range colls {
|
for _, coll := range colls {
|
||||||
var (
|
var (
|
||||||
breakLoop bool
|
breakLoop bool
|
||||||
@ -91,10 +95,10 @@ func parseMetadataCollections(
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||||
|
|
||||||
case item, ok := <-items:
|
case item, ok := <-items:
|
||||||
if !ok {
|
if !ok || errs.Failure() != nil {
|
||||||
breakLoop = true
|
breakLoop = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -106,13 +110,13 @@ func parseMetadataCollections(
|
|||||||
|
|
||||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.New("decoding metadata json").WithClues(ctx)
|
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch item.UUID() {
|
switch item.UUID() {
|
||||||
case graph.PreviousPathFileName:
|
case graph.PreviousPathFileName:
|
||||||
if _, ok := found[category]["path"]; ok {
|
if _, ok := found[category]["path"]; ok {
|
||||||
return nil, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, p := range m {
|
for k, p := range m {
|
||||||
@ -123,7 +127,7 @@ func parseMetadataCollections(
|
|||||||
|
|
||||||
case graph.DeltaURLsFileName:
|
case graph.DeltaURLsFileName:
|
||||||
if _, ok := found[category]["delta"]; ok {
|
if _, ok := found[category]["delta"]; ok {
|
||||||
return nil, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, d := range m {
|
for k, d := range m {
|
||||||
@ -142,6 +146,16 @@ func parseMetadataCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if errs.Failure() != nil {
|
||||||
|
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||||
|
|
||||||
|
return CatDeltaPaths{
|
||||||
|
path.ContactsCategory: {},
|
||||||
|
path.EmailCategory: {},
|
||||||
|
path.EventsCategory: {},
|
||||||
|
}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Remove any entries that contain a path or a delta, but not both.
|
// Remove any entries that contain a path or a delta, but not both.
|
||||||
// That metadata is considered incomplete, and needs to incur a
|
// That metadata is considered incomplete, and needs to incur a
|
||||||
// complete backup on the next run.
|
// complete backup on the next run.
|
||||||
@ -153,7 +167,7 @@ func parseMetadataCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cdp, nil
|
return cdp, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DataCollections returns a DataCollection which the caller can
|
// DataCollections returns a DataCollection which the caller can
|
||||||
@ -168,10 +182,10 @@ func DataCollections(
|
|||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||||
eb, err := selector.ToExchangeBackup()
|
eb, err := selector.ToExchangeBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -187,9 +201,9 @@ func DataCollections(
|
|||||||
graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch)
|
graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch)
|
||||||
}
|
}
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, metadata, errs)
|
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, scope := range eb.Scopes() {
|
for _, scope := range eb.Scopes() {
|
||||||
@ -228,13 +242,13 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil, el.Failure()
|
return collections, nil, canUsePreviousBackup, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// createCollections - utility function that retrieves M365
|
// createCollections - utility function that retrieves M365
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package exchange
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -45,6 +46,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
name string
|
name string
|
||||||
data []fileValues
|
data []fileValues
|
||||||
expect map[string]DeltaPath
|
expect map[string]DeltaPath
|
||||||
|
canUsePreviousBackup bool
|
||||||
expectError assert.ErrorAssertionFunc
|
expectError assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -53,6 +55,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
{graph.DeltaURLsFileName, "delta-link"},
|
{graph.DeltaURLsFileName, "delta-link"},
|
||||||
},
|
},
|
||||||
expect: map[string]DeltaPath{},
|
expect: map[string]DeltaPath{},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -61,6 +64,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
{graph.DeltaURLsFileName, "delta-link"},
|
{graph.DeltaURLsFileName, "delta-link"},
|
||||||
{graph.DeltaURLsFileName, "delta-link-2"},
|
{graph.DeltaURLsFileName, "delta-link-2"},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: false,
|
||||||
expectError: assert.Error,
|
expectError: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -74,6 +78,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -82,6 +87,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
{graph.PreviousPathFileName, "prev-path"},
|
{graph.PreviousPathFileName, "prev-path"},
|
||||||
{graph.PreviousPathFileName, "prev-path-2"},
|
{graph.PreviousPathFileName, "prev-path-2"},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: false,
|
||||||
expectError: assert.Error,
|
expectError: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -96,6 +102,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -105,6 +112,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
{graph.PreviousPathFileName, ""},
|
{graph.PreviousPathFileName, ""},
|
||||||
},
|
},
|
||||||
expect: map[string]DeltaPath{},
|
expect: map[string]DeltaPath{},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -119,6 +127,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -133,6 +142,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -147,6 +157,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -164,6 +175,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
Path: "prev-path",
|
Path: "prev-path",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
expectError: assert.NoError,
|
expectError: assert.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -191,11 +203,13 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: coll},
|
data.NoFetchRestoreCollection{Collection: coll},
|
||||||
}, fault.New(true))
|
})
|
||||||
test.expectError(t, err, clues.ToCore(err))
|
test.expectError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|
||||||
emails := cdps[path.EmailCategory]
|
emails := cdps[path.EmailCategory]
|
||||||
|
|
||||||
assert.Len(t, emails, len(test.expect))
|
assert.Len(t, emails, len(test.expect))
|
||||||
@ -208,6 +222,52 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type failingColl struct {
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||||
|
ic := make(chan data.Stream)
|
||||||
|
defer close(ic)
|
||||||
|
|
||||||
|
errs.AddRecoverable(assert.AnError)
|
||||||
|
|
||||||
|
return ic
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f failingColl) FullPath() path.Path {
|
||||||
|
tmp, err := path.Build(
|
||||||
|
"tenant",
|
||||||
|
"user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"inbox")
|
||||||
|
require.NoError(f.t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) {
|
||||||
|
// no fetch calls will be made
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This check is to ensure that we don't error out, but still return
|
||||||
|
// canUsePreviousBackup as false on read errors
|
||||||
|
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections_ReadFailure() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
fc := failingColl{t}
|
||||||
|
|
||||||
|
_, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{fc})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, canUsePreviousBackup)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Integration tests
|
// Integration tests
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -401,10 +461,11 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
|||||||
|
|
||||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: metadata},
|
data.NoFetchRestoreCollection{Collection: metadata},
|
||||||
}, fault.New(true))
|
})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|
||||||
dps := cdps[test.scope.Category().PathType()]
|
dps := cdps[test.scope.Category().PathType()]
|
||||||
|
|
||||||
|
|||||||
@ -425,10 +425,9 @@ func checkMetadata(
|
|||||||
expect DeltaPaths,
|
expect DeltaPaths,
|
||||||
c data.BackupCollection,
|
c data.BackupCollection,
|
||||||
) {
|
) {
|
||||||
catPaths, err := parseMetadataCollections(
|
catPaths, _, err := parseMetadataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}},
|
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}})
|
||||||
fault.New(true))
|
|
||||||
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1083,7 +1083,7 @@ func testRestoreFolderNamedFolderRegression(
|
|||||||
collectionsLatest: expected,
|
collectionsLatest: expected,
|
||||||
}
|
}
|
||||||
|
|
||||||
runRestoreTestWithVerion(
|
runRestoreTestWithVersion(
|
||||||
t,
|
t,
|
||||||
testData,
|
testData,
|
||||||
suite.Tenant(),
|
suite.Tenant(),
|
||||||
|
|||||||
@ -487,7 +487,7 @@ func runBackupAndCompare(
|
|||||||
t.Logf("Selective backup of %s\n", backupSel)
|
t.Logf("Selective backup of %s\n", backupSel)
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
backupSel,
|
backupSel,
|
||||||
backupSel,
|
backupSel,
|
||||||
@ -496,6 +496,7 @@ func runBackupAndCompare(
|
|||||||
config.Opts,
|
config.Opts,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
@ -564,7 +565,7 @@ func runRestoreBackupTest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runRestoreTest restores with data using the test's backup version
|
// runRestoreTest restores with data using the test's backup version
|
||||||
func runRestoreTestWithVerion(
|
func runRestoreTestWithVersion(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
test restoreBackupInfoMultiVersion,
|
test restoreBackupInfoMultiVersion,
|
||||||
tenant string,
|
tenant string,
|
||||||
@ -1059,7 +1060,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||||
t.Log("Selective backup of", backupSel)
|
t.Log("Selective backup of", backupSel)
|
||||||
|
|
||||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
backupSel,
|
backupSel,
|
||||||
backupSel,
|
backupSel,
|
||||||
@ -1071,6 +1072,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
},
|
},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
@ -1214,7 +1216,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections
|
|||||||
|
|
||||||
backupSel.SetDiscreteOwnerIDName(id, name)
|
backupSel.SetDiscreteOwnerIDName(id, name)
|
||||||
|
|
||||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
inMock.NewProvider(id, name),
|
inMock.NewProvider(id, name),
|
||||||
backupSel,
|
backupSel,
|
||||||
@ -1226,6 +1228,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections
|
|||||||
},
|
},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
|
|||||||
@ -38,9 +38,10 @@ func (gc GraphConnector) ProduceBackupCollections(
|
|||||||
) (
|
) (
|
||||||
[]data.BackupCollection,
|
[]data.BackupCollection,
|
||||||
prefixmatcher.StringSetReader,
|
prefixmatcher.StringSetReader,
|
||||||
|
bool,
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
return gc.Collections, gc.Exclude, gc.Err
|
return gc.Collections, gc.Exclude, gc.Err == nil, gc.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc GraphConnector) IsBackupRunnable(
|
func (gc GraphConnector) IsBackupRunnable(
|
||||||
|
|||||||
@ -92,8 +92,7 @@ func NewCollections(
|
|||||||
func deserializeMetadata(
|
func deserializeMetadata(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cols []data.RestoreCollection,
|
cols []data.RestoreCollection,
|
||||||
errs *fault.Bus,
|
) (map[string]string, map[string]map[string]string, bool, error) {
|
||||||
) (map[string]string, map[string]map[string]string, error) {
|
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"deserialzing previous backup metadata",
|
"deserialzing previous backup metadata",
|
||||||
"num_collections", len(cols))
|
"num_collections", len(cols))
|
||||||
@ -101,11 +100,11 @@ func deserializeMetadata(
|
|||||||
var (
|
var (
|
||||||
prevDeltas = map[string]string{}
|
prevDeltas = map[string]string{}
|
||||||
prevFolders = map[string]map[string]string{}
|
prevFolders = map[string]map[string]string{}
|
||||||
el = errs.Local()
|
errs = fault.New(true) // metadata item reads should not fail backup
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, col := range cols {
|
for _, col := range cols {
|
||||||
if el.Failure() != nil {
|
if errs.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +113,7 @@ func deserializeMetadata(
|
|||||||
for breakLoop := false; !breakLoop; {
|
for breakLoop := false; !breakLoop; {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, nil, clues.Wrap(ctx.Err(), "deserialzing previous backup metadata").WithClues(ctx)
|
return nil, nil, false, clues.Wrap(ctx.Err(), "deserialzing previous backup metadata").WithClues(ctx)
|
||||||
|
|
||||||
case item, ok := <-items:
|
case item, ok := <-items:
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -154,7 +153,7 @@ func deserializeMetadata(
|
|||||||
// these cases. We can make the logic for deciding when to continue vs.
|
// these cases. We can make the logic for deciding when to continue vs.
|
||||||
// when to fail less strict in the future if needed.
|
// when to fail less strict in the future if needed.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Stack(err).WithClues(ictx)
|
return nil, nil, false, clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -186,7 +185,14 @@ func deserializeMetadata(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return prevDeltas, prevFolders, el.Failure()
|
// if reads from items failed, return empty but no error
|
||||||
|
if errs.Failure() != nil {
|
||||||
|
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||||
|
|
||||||
|
return map[string]string{}, map[string]map[string]string{}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return prevDeltas, prevFolders, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errExistingMapping = clues.New("mapping already exists for same drive ID")
|
var errExistingMapping = clues.New("mapping already exists for same drive ID")
|
||||||
@ -229,10 +235,10 @@ func (c *Collections) Get(
|
|||||||
prevMetadata []data.RestoreCollection,
|
prevMetadata []data.RestoreCollection,
|
||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs)
|
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
driveTombstones := map[string]struct{}{}
|
driveTombstones := map[string]struct{}{}
|
||||||
@ -249,7 +255,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -294,7 +300,7 @@ func (c *Collections) Get(
|
|||||||
prevDelta,
|
prevDelta,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for logging below.
|
// Used for logging below.
|
||||||
@ -332,7 +338,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID, c.resourceOwner)
|
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID, c.resourceOwner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssmb.Add(p.String(), excluded)
|
ssmb.Add(p.String(), excluded)
|
||||||
@ -360,7 +366,7 @@ func (c *Collections) Get(
|
|||||||
prevPath, err := path.FromDataLayerPath(p, false)
|
prevPath, err := path.FromDataLayerPath(p, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
col, err := NewCollection(
|
col, err := NewCollection(
|
||||||
@ -373,7 +379,7 @@ func (c *Collections) Get(
|
|||||||
CollectionScopeUnknown,
|
CollectionScopeUnknown,
|
||||||
true)
|
true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making collection").WithClues(ictx)
|
return nil, false, clues.Wrap(err, "making collection").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.CollectionMap[driveID][fldID] = col
|
c.CollectionMap[driveID][fldID] = col
|
||||||
@ -395,7 +401,7 @@ func (c *Collections) Get(
|
|||||||
for driveID := range driveTombstones {
|
for driveID := range driveTombstones {
|
||||||
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, c.resourceOwner, driveID)
|
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, c.resourceOwner, driveID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
return nil, false, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
coll, err := NewCollection(
|
coll, err := NewCollection(
|
||||||
@ -408,7 +414,7 @@ func (c *Collections) Get(
|
|||||||
CollectionScopeUnknown,
|
CollectionScopeUnknown,
|
||||||
true)
|
true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
return nil, false, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, coll)
|
collections = append(collections, coll)
|
||||||
@ -436,7 +442,7 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil
|
return collections, canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateCollectionPaths(
|
func updateCollectionPaths(
|
||||||
|
|||||||
@ -806,6 +806,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
cols []func() []graph.MetadataCollectionEntry
|
cols []func() []graph.MetadataCollectionEntry
|
||||||
expectedDeltas map[string]string
|
expectedDeltas map[string]string
|
||||||
expectedPaths map[string]map[string]string
|
expectedPaths map[string]map[string]string
|
||||||
|
canUsePreviousBackup bool
|
||||||
errCheck assert.ErrorAssertionFunc
|
errCheck assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -836,6 +837,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
folderID1: path1,
|
folderID1: path1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -852,6 +854,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
},
|
},
|
||||||
expectedDeltas: map[string]string{},
|
expectedDeltas: map[string]string{},
|
||||||
expectedPaths: map[string]map[string]string{},
|
expectedPaths: map[string]map[string]string{},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -876,6 +879,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
folderID1: path1,
|
folderID1: path1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -901,6 +905,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
},
|
},
|
||||||
expectedDeltas: map[string]string{},
|
expectedDeltas: map[string]string{},
|
||||||
expectedPaths: map[string]map[string]string{driveID1: {}},
|
expectedPaths: map[string]map[string]string{driveID1: {}},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -934,6 +939,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
folderID1: path1,
|
folderID1: path1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -984,6 +990,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
folderID2: path2,
|
folderID2: path2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1000,6 +1007,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1036,6 +1044,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
folderID1: path1,
|
folderID1: path1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1072,6 +1081,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
},
|
},
|
||||||
expectedDeltas: nil,
|
expectedDeltas: nil,
|
||||||
expectedPaths: nil,
|
expectedPaths: nil,
|
||||||
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1104,6 +1114,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
},
|
},
|
||||||
expectedDeltas: nil,
|
expectedDeltas: nil,
|
||||||
expectedPaths: nil,
|
expectedPaths: nil,
|
||||||
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.Error,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1130,8 +1141,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
|
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
|
||||||
}
|
}
|
||||||
|
|
||||||
deltas, paths, err := deserializeMetadata(ctx, cols, fault.New(true))
|
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
|
||||||
test.errCheck(t, err)
|
test.errCheck(t, err)
|
||||||
|
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|
||||||
assert.Equal(t, test.expectedDeltas, deltas, "deltas")
|
assert.Equal(t, test.expectedDeltas, deltas, "deltas")
|
||||||
assert.Equal(t, test.expectedPaths, paths, "paths")
|
assert.Equal(t, test.expectedPaths, paths, "paths")
|
||||||
@ -1139,6 +1151,34 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type failingColl struct{}
|
||||||
|
|
||||||
|
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||||
|
ic := make(chan data.Stream)
|
||||||
|
defer close(ic)
|
||||||
|
|
||||||
|
errs.AddRecoverable(assert.AnError)
|
||||||
|
|
||||||
|
return ic
|
||||||
|
}
|
||||||
|
func (f failingColl) FullPath() path.Path { return nil }
|
||||||
|
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) { return nil, nil }
|
||||||
|
|
||||||
|
// This check is to ensure that we don't error out, but still return
|
||||||
|
// canUsePreviousBackup as false on read errors
|
||||||
|
func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata_ReadFailure() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
fc := failingColl{}
|
||||||
|
|
||||||
|
_, _, canUsePreviousBackup, err := deserializeMetadata(ctx, []data.RestoreCollection{fc})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, canUsePreviousBackup)
|
||||||
|
}
|
||||||
|
|
||||||
type mockDeltaPageLinker struct {
|
type mockDeltaPageLinker struct {
|
||||||
link *string
|
link *string
|
||||||
delta *string
|
delta *string
|
||||||
@ -1245,6 +1285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
name string
|
name string
|
||||||
drives []models.Driveable
|
drives []models.Driveable
|
||||||
items map[string][]deltaPagerResult
|
items map[string][]deltaPagerResult
|
||||||
|
canUsePreviousBackup bool
|
||||||
errCheck assert.ErrorAssertionFunc
|
errCheck assert.ErrorAssertionFunc
|
||||||
prevFolderPaths map[string]map[string]string
|
prevFolderPaths map[string]map[string]string
|
||||||
// Collection name -> set of item IDs. We can't check item data because
|
// Collection name -> set of item IDs. We can't check item data because
|
||||||
@ -1273,6 +1314,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {"root": rootFolderPath1},
|
driveID1: {"root": rootFolderPath1},
|
||||||
@ -1304,6 +1346,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {"root": rootFolderPath1},
|
driveID1: {"root": rootFolderPath1},
|
||||||
@ -1336,6 +1379,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{},
|
prevFolderPaths: map[string]map[string]string{},
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
@ -1373,6 +1417,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{},
|
prevFolderPaths: map[string]map[string]string{},
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
@ -1410,6 +1455,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1448,6 +1494,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1492,6 +1539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1543,6 +1591,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1604,6 +1653,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1647,6 +1697,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.Error,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1673,6 +1724,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||||
@ -1715,6 +1767,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||||
@ -1757,6 +1810,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1799,6 +1853,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1845,6 +1900,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1901,6 +1957,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -1953,6 +2010,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -1999,6 +2057,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -2041,6 +2100,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
@ -2086,6 +2146,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -2128,6 +2189,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -2165,6 +2227,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -2199,6 +2262,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {},
|
driveID1: {},
|
||||||
@ -2232,6 +2296,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
prevFolderPaths: map[string]map[string]string{
|
prevFolderPaths: map[string]map[string]string{
|
||||||
driveID1: {"root": rootFolderPath1},
|
driveID1: {"root": rootFolderPath1},
|
||||||
@ -2310,8 +2375,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
|
|
||||||
delList := prefixmatcher.NewStringSetBuilder()
|
delList := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|
||||||
cols, err := c.Get(ctx, prevMetadata, delList, errs)
|
cols, canUsePreviousBackup, err := c.Get(ctx, prevMetadata, delList, errs)
|
||||||
test.errCheck(t, err)
|
test.errCheck(t, err)
|
||||||
|
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()))
|
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2328,12 +2394,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if folderPath == metadataPath.String() {
|
if folderPath == metadataPath.String() {
|
||||||
deltas, paths, err := deserializeMetadata(
|
deltas, paths, _, err := deserializeMetadata(
|
||||||
ctx,
|
ctx,
|
||||||
[]data.RestoreCollection{
|
[]data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: baseCol},
|
data.NoFetchRestoreCollection{Collection: baseCol},
|
||||||
},
|
})
|
||||||
fault.New(true))
|
|
||||||
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@ -44,10 +44,10 @@ func DataCollections(
|
|||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||||
odb, err := selector.ToOneDriveBackup()
|
odb, err := selector.ToOneDriveBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
return nil, nil, false, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -55,6 +55,8 @@ func DataCollections(
|
|||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||||
|
odcs []data.BackupCollection
|
||||||
|
canUsePreviousBackup bool
|
||||||
)
|
)
|
||||||
|
|
||||||
// for each scope that includes oneDrive items, get all
|
// for each scope that includes oneDrive items, get all
|
||||||
@ -73,7 +75,7 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
|
|
||||||
odcs, err := nc.Get(ctx, metadata, ssmb, errs)
|
odcs, canUsePreviousBackup, err = nc.Get(ctx, metadata, ssmb, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
@ -90,7 +92,7 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, mcs...)
|
collections = append(collections, mcs...)
|
||||||
@ -106,13 +108,13 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), el.Failure()
|
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// adds data migrations to the collection set.
|
// adds data migrations to the collection set.
|
||||||
|
|||||||
@ -471,7 +471,7 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
|||||||
|
|
||||||
ssmb := prefixmatcher.NewStringSetBuilder()
|
ssmb := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|
||||||
odcs, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
odcs, _, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
// Don't expect excludes as this isn't an incremental backup.
|
// Don't expect excludes as this isn't an incremental backup.
|
||||||
assert.True(t, ssmb.Empty())
|
assert.True(t, ssmb.Empty())
|
||||||
|
|||||||
@ -38,10 +38,10 @@ func DataCollections(
|
|||||||
su statusUpdater,
|
su statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||||
b, err := selector.ToSharePointBackup()
|
b, err := selector.ToSharePointBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Wrap(err, "sharePointDataCollection: parsing selector")
|
return nil, nil, false, clues.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
@ -54,6 +54,7 @@ func DataCollections(
|
|||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||||
|
canUsePreviousBackup bool
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, scope := range b.Scopes() {
|
for _, scope := range b.Scopes() {
|
||||||
@ -83,8 +84,12 @@ func DataCollections(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lists don't make use of previous metadata
|
||||||
|
// TODO: Revisit when we add support of lists
|
||||||
|
canUsePreviousBackup = true
|
||||||
|
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
spcs, err = collectLibraries(
|
spcs, canUsePreviousBackup, err = collectLibraries(
|
||||||
ctx,
|
ctx,
|
||||||
ac.Drives(),
|
ac.Drives(),
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
@ -113,6 +118,10 @@ func DataCollections(
|
|||||||
el.AddRecoverable(err)
|
el.AddRecoverable(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lists don't make use of previous metadata
|
||||||
|
// TODO: Revisit when we add support of pages
|
||||||
|
canUsePreviousBackup = true
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, spcs...)
|
collections = append(collections, spcs...)
|
||||||
@ -132,13 +141,13 @@ func DataCollections(
|
|||||||
su.UpdateStatus,
|
su.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), el.Failure()
|
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectLists(
|
func collectLists(
|
||||||
@ -205,7 +214,7 @@ func collectLibraries(
|
|||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -219,12 +228,12 @@ func collectLibraries(
|
|||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
)
|
)
|
||||||
|
|
||||||
odcs, err := colls.Get(ctx, metadata, ssmb, errs)
|
odcs, canUsePreviousBackup, err := colls.Get(ctx, metadata, ssmb, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, graph.Wrap(ctx, err, "getting library")
|
return nil, false, graph.Wrap(ctx, err, "getting library")
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(collections, odcs...), nil
|
return append(collections, odcs...), canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
||||||
|
|||||||
@ -20,38 +20,48 @@ var (
|
|||||||
|
|
||||||
type kopiaDataCollection struct {
|
type kopiaDataCollection struct {
|
||||||
path path.Path
|
path path.Path
|
||||||
streams []data.Stream
|
|
||||||
dir fs.Directory
|
dir fs.Directory
|
||||||
|
items []string
|
||||||
counter ByteCounter
|
counter ByteCounter
|
||||||
expectedVersion uint32
|
expectedVersion uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kdc *kopiaDataCollection) addStream(
|
|
||||||
ctx context.Context,
|
|
||||||
name string,
|
|
||||||
) error {
|
|
||||||
s, err := kdc.FetchItemByName(ctx, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
kdc.streams = append(kdc.streams, s)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kdc *kopiaDataCollection) Items(
|
func (kdc *kopiaDataCollection) Items(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_ *fault.Bus, // unused, just matching the interface
|
errs *fault.Bus,
|
||||||
) <-chan data.Stream {
|
) <-chan data.Stream {
|
||||||
res := make(chan data.Stream)
|
var (
|
||||||
|
res = make(chan data.Stream)
|
||||||
|
el = errs.Local()
|
||||||
|
loadCount = 0
|
||||||
|
)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(res)
|
defer close(res)
|
||||||
|
|
||||||
for _, s := range kdc.streams {
|
for _, item := range kdc.items {
|
||||||
|
s, err := kdc.FetchItemByName(ctx, item)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(clues.Wrap(err, "fetching item").
|
||||||
|
WithClues(ctx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
loadCount++
|
||||||
|
if loadCount%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"loading items from kopia",
|
||||||
|
"loaded_items", loadCount)
|
||||||
|
}
|
||||||
|
|
||||||
res <- s
|
res <- s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"done loading items from kopia",
|
||||||
|
"loaded_items", loadCount)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|||||||
@ -165,15 +165,15 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
{
|
{
|
||||||
name: "SingleStream",
|
name: "SingleStream",
|
||||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||||
uuids[0]: assert.NoError,
|
uuids[0]: nil,
|
||||||
},
|
},
|
||||||
expectedLoaded: []loadedData{files[0]},
|
expectedLoaded: []loadedData{files[0]},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MultipleStreams",
|
name: "MultipleStreams",
|
||||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||||
uuids[0]: assert.NoError,
|
uuids[0]: nil,
|
||||||
uuids[1]: assert.NoError,
|
uuids[1]: nil,
|
||||||
},
|
},
|
||||||
expectedLoaded: files,
|
expectedLoaded: files,
|
||||||
},
|
},
|
||||||
@ -181,7 +181,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
name: "Some Not Found Errors",
|
name: "Some Not Found Errors",
|
||||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||||
fileLookupErrName: assert.Error,
|
fileLookupErrName: assert.Error,
|
||||||
uuids[0]: assert.NoError,
|
uuids[0]: nil,
|
||||||
},
|
},
|
||||||
expectedLoaded: []loadedData{files[0]},
|
expectedLoaded: []loadedData{files[0]},
|
||||||
},
|
},
|
||||||
@ -189,7 +189,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
name: "Some Not A File Errors",
|
name: "Some Not A File Errors",
|
||||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||||
notFileErrName: assert.Error,
|
notFileErrName: assert.Error,
|
||||||
uuids[0]: assert.NoError,
|
uuids[0]: nil,
|
||||||
},
|
},
|
||||||
expectedLoaded: []loadedData{files[0]},
|
expectedLoaded: []loadedData{files[0]},
|
||||||
},
|
},
|
||||||
@ -197,7 +197,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
name: "Some Open Errors",
|
name: "Some Open Errors",
|
||||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||||
fileOpenErrName: assert.Error,
|
fileOpenErrName: assert.Error,
|
||||||
uuids[0]: assert.NoError,
|
uuids[0]: nil,
|
||||||
},
|
},
|
||||||
expectedLoaded: []loadedData{files[0]},
|
expectedLoaded: []loadedData{files[0]},
|
||||||
},
|
},
|
||||||
@ -217,20 +217,27 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
items := []string{}
|
||||||
|
errs := []assert.ErrorAssertionFunc{}
|
||||||
|
|
||||||
|
for uuid, err := range test.uuidsAndErrors {
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
items = append(items, uuid)
|
||||||
|
}
|
||||||
|
|
||||||
c := kopiaDataCollection{
|
c := kopiaDataCollection{
|
||||||
dir: getLayout(),
|
dir: getLayout(),
|
||||||
path: nil,
|
path: nil,
|
||||||
|
items: items,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: serializationVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
for uuid, expectErr := range test.uuidsAndErrors {
|
|
||||||
err := c.addStream(ctx, uuid)
|
|
||||||
expectErr(t, err, "adding stream to collection", clues.ToCore(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
found []loadedData
|
found []loadedData
|
||||||
bus = fault.New(true)
|
bus = fault.New(false)
|
||||||
)
|
)
|
||||||
|
|
||||||
for returnedStream := range c.Items(ctx, bus) {
|
for returnedStream := range c.Items(ctx, bus) {
|
||||||
@ -256,7 +263,12 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
f.size = ss.Size()
|
f.size = ss.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Empty(t, bus.Recovered(), "expected no recoverable errors")
|
// We expect the items to be fetched in the order they are
|
||||||
|
// in the struct or the errors will not line up
|
||||||
|
for i, err := range bus.Recovered() {
|
||||||
|
assert.True(t, errs[i](t, err), "expected error", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
assert.NoError(t, bus.Failure(), "expected no hard failures")
|
assert.NoError(t, bus.Failure(), "expected no hard failures")
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.expectedLoaded, found, "loaded items")
|
assert.ElementsMatch(t, test.expectedLoaded, found, "loaded items")
|
||||||
|
|||||||
@ -394,7 +394,6 @@ func loadDirsAndItems(
|
|||||||
var (
|
var (
|
||||||
el = bus.Local()
|
el = bus.Local()
|
||||||
res = make([]data.RestoreCollection, 0, len(toLoad))
|
res = make([]data.RestoreCollection, 0, len(toLoad))
|
||||||
loadCount = 0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, col := range toLoad {
|
for _, col := range toLoad {
|
||||||
@ -426,6 +425,7 @@ func loadDirsAndItems(
|
|||||||
dc := &kopiaDataCollection{
|
dc := &kopiaDataCollection{
|
||||||
path: col.restorePath,
|
path: col.restorePath,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
|
items: dirItems.items,
|
||||||
counter: bcounter,
|
counter: bcounter,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: serializationVersion,
|
||||||
}
|
}
|
||||||
@ -437,34 +437,8 @@ func loadDirsAndItems(
|
|||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range dirItems.items {
|
|
||||||
if el.Failure() != nil {
|
|
||||||
return nil, el.Failure()
|
|
||||||
}
|
|
||||||
|
|
||||||
err := dc.addStream(ictx, item)
|
|
||||||
if err != nil {
|
|
||||||
el.AddRecoverable(clues.Wrap(err, "loading item").
|
|
||||||
WithClues(ictx).
|
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
loadCount++
|
|
||||||
if loadCount%1000 == 0 {
|
|
||||||
logger.Ctx(ctx).Infow(
|
|
||||||
"loading items from kopia",
|
|
||||||
"loaded_items", loadCount)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
|
||||||
"done loading items from kopia",
|
|
||||||
"loaded_items", loadCount)
|
|
||||||
|
|
||||||
return res, el.Failure()
|
return res, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -843,16 +843,28 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
|
|
||||||
ic := i64counter{}
|
ic := i64counter{}
|
||||||
|
|
||||||
_, err = suite.w.ProduceRestoreCollections(
|
dcs, err := suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
toRestorePaths(t, failedPath),
|
toRestorePaths(t, failedPath),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
assert.NoError(t, err, "error producing restore collections")
|
||||||
|
|
||||||
|
require.Len(t, dcs, 1, "number of restore collections")
|
||||||
|
|
||||||
|
errs := fault.New(true)
|
||||||
|
items := dcs[0].Items(suite.ctx, errs)
|
||||||
|
|
||||||
|
// Get all the items from channel
|
||||||
|
//nolint:revive
|
||||||
|
for range items {
|
||||||
|
}
|
||||||
|
|
||||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||||
// may run into kopia-assisted incrementals issues because only mod time and
|
// may run into kopia-assisted incrementals issues because only mod time and
|
||||||
// not file size is checked for StreamingFiles.
|
// not file size is checked for StreamingFiles.
|
||||||
assert.ErrorIs(t, err, data.ErrNotFound, "errored file is restorable", clues.ToCore(err))
|
assert.ErrorIs(t, errs.Failure(), data.ErrNotFound, "errored file is restorable", clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
type backedupFile struct {
|
type backedupFile struct {
|
||||||
@ -1223,13 +1235,25 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
|
|
||||||
ic := i64counter{}
|
ic := i64counter{}
|
||||||
|
|
||||||
_, err = suite.w.ProduceRestoreCollections(
|
dcs, err := suite.w.ProduceRestoreCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
toRestorePaths(t, suite.files[suite.testPath1.String()][0].itemPath),
|
toRestorePaths(t, suite.files[suite.testPath1.String()][0].itemPath),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.restoreCheck(t, err, clues.ToCore(err))
|
|
||||||
|
assert.NoError(t, err, "errors producing collection", clues.ToCore(err))
|
||||||
|
require.Len(t, dcs, 1, "unexpected number of restore collections")
|
||||||
|
|
||||||
|
errs := fault.New(true)
|
||||||
|
items := dcs[0].Items(suite.ctx, errs)
|
||||||
|
|
||||||
|
// Get all the items from channel
|
||||||
|
//nolint:revive
|
||||||
|
for range items {
|
||||||
|
}
|
||||||
|
|
||||||
|
test.restoreCheck(t, errs.Failure(), errs)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1252,6 +1276,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
inputPaths []path.Path
|
inputPaths []path.Path
|
||||||
expectedCollections int
|
expectedCollections int
|
||||||
expectedErr assert.ErrorAssertionFunc
|
expectedErr assert.ErrorAssertionFunc
|
||||||
|
expectedCollectionErr assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "SingleItem",
|
name: "SingleItem",
|
||||||
@ -1260,6 +1285,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
},
|
},
|
||||||
expectedCollections: 1,
|
expectedCollections: 1,
|
||||||
expectedErr: assert.NoError,
|
expectedErr: assert.NoError,
|
||||||
|
expectedCollectionErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MultipleItemsSameCollection",
|
name: "MultipleItemsSameCollection",
|
||||||
@ -1269,6 +1295,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
},
|
},
|
||||||
expectedCollections: 1,
|
expectedCollections: 1,
|
||||||
expectedErr: assert.NoError,
|
expectedErr: assert.NoError,
|
||||||
|
expectedCollectionErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MultipleItemsDifferentCollections",
|
name: "MultipleItemsDifferentCollections",
|
||||||
@ -1278,6 +1305,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
},
|
},
|
||||||
expectedCollections: 2,
|
expectedCollections: 2,
|
||||||
expectedErr: assert.NoError,
|
expectedErr: assert.NoError,
|
||||||
|
expectedCollectionErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "TargetNotAFile",
|
name: "TargetNotAFile",
|
||||||
@ -1288,6 +1316,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
},
|
},
|
||||||
expectedCollections: 0,
|
expectedCollections: 0,
|
||||||
expectedErr: assert.Error,
|
expectedErr: assert.Error,
|
||||||
|
expectedCollectionErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "NonExistentFile",
|
name: "NonExistentFile",
|
||||||
@ -1297,7 +1326,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
suite.files[suite.testPath2.String()][0].itemPath,
|
suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
},
|
},
|
||||||
expectedCollections: 0,
|
expectedCollections: 0,
|
||||||
expectedErr: assert.Error,
|
expectedErr: assert.NoError,
|
||||||
|
expectedCollectionErr: assert.Error, // folder for doesntExist does not exist
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1330,12 +1360,28 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
|||||||
toRestorePaths(t, test.inputPaths...),
|
toRestorePaths(t, test.inputPaths...),
|
||||||
&ic,
|
&ic,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.expectedErr(t, err, clues.ToCore(err))
|
test.expectedCollectionErr(t, err, clues.ToCore(err), "producing collections")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
errs := fault.New(true)
|
||||||
|
|
||||||
|
for _, dc := range result {
|
||||||
|
// Get all the items from channel
|
||||||
|
items := dc.Items(suite.ctx, errs)
|
||||||
|
//nolint:revive
|
||||||
|
for range items {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test.expectedErr(t, errs.Failure(), errs.Failure(), "getting items")
|
||||||
|
|
||||||
|
if errs.Failure() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
assert.Len(t, result, test.expectedCollections)
|
assert.Len(t, result, test.expectedCollections)
|
||||||
assert.Less(t, int64(0), ic.i)
|
assert.Less(t, int64(0), ic.i)
|
||||||
testForFiles(t, ctx, expected, result)
|
testForFiles(t, ctx, expected, result)
|
||||||
@ -1456,7 +1502,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Path
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Len(t, result, test.expectedCollections)
|
assert.Len(t, result, test.expectedCollections)
|
||||||
assert.Less(t, int64(0), ic.i)
|
|
||||||
testForFiles(t, ctx, expected, result)
|
testForFiles(t, ctx, expected, result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -324,7 +324,7 @@ func (op *BackupOperation) do(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cs, ssmb, err := produceBackupDataCollections(
|
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
op.bp,
|
op.bp,
|
||||||
op.ResourceOwner,
|
op.ResourceOwner,
|
||||||
@ -348,7 +348,7 @@ func (op *BackupOperation) do(
|
|||||||
cs,
|
cs,
|
||||||
ssmb,
|
ssmb,
|
||||||
backupID,
|
backupID,
|
||||||
op.incremental && canUseMetaData,
|
op.incremental && canUseMetaData && canUsePreviousBackup,
|
||||||
op.Errors)
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "persisting collection backups")
|
return nil, clues.Wrap(err, "persisting collection backups")
|
||||||
@ -406,7 +406,7 @@ func produceBackupDataCollections(
|
|||||||
lastBackupVersion int,
|
lastBackupVersion int,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) {
|
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
||||||
complete := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
complete := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
||||||
defer func() {
|
defer func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
|
|||||||
@ -26,7 +26,7 @@ type (
|
|||||||
lastBackupVersion int,
|
lastBackupVersion int,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error)
|
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error)
|
||||||
IsBackupRunnable(ctx context.Context, service path.ServiceType, resourceOwner string) (bool, error)
|
IsBackupRunnable(ctx context.Context, service path.ServiceType, resourceOwner string) (bool, error)
|
||||||
|
|
||||||
Wait() *data.CollectionStats
|
Wait() *data.CollectionStats
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user