Expand interfaces for components used during backup (#1731)

## Description

Expand interfaces for `GraphConnector.DataCollections` and `kopia.Wrapper.BackupCollections` to include parameters that will be needed during incremental backups. This patch only expands the interfaces, it does not add any extra functionality and the passed parameters are currently ignored.

In the future, passing nil for any of the new parameters should result in the current "full backup" behavior that Corso has. Passing values in these parameters should enable delta token-based incremental backups (assuming all the required data is there for the incremental backup)

## Type of change

- [ ] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Test
- [ ] 💻 CI/Deployment
- [x] 🐹 Trivial/Minor

## Issue(s)

* closes #1700 

## Test Plan

- [ ] 💪 Manual
- [x]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2022-12-08 10:40:10 -08:00 committed by GitHub
parent e15d86e82f
commit 19f7de59f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 55 additions and 13 deletions

View File

@ -24,8 +24,16 @@ import (
// Data Collections // Data Collections
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// DataCollections utility function to launch backup operations for exchange and onedrive // DataCollections utility function to launch backup operations for exchange and
func (gc *GraphConnector) DataCollections(ctx context.Context, sels selectors.Selector) ([]data.Collection, error) { // onedrive. metadataCols contains any collections with metadata files that may
// be useful for the current backup. Metadata can include things like delta
// tokens or the previous backup's folder hierarchy. The absence of metadataCols
// results in all data being pulled.
func (gc *GraphConnector) DataCollections(
ctx context.Context,
sels selectors.Selector,
metadataCols []data.Collection,
) ([]data.Collection, error) {
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String())) ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
defer end() defer end()

View File

@ -157,7 +157,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestInvalidUserForDataColl
for _, test := range tests { for _, test := range tests {
suite.T().Run(test.name, func(t *testing.T) { suite.T().Run(test.name, func(t *testing.T) {
collections, err := connector.DataCollections(ctx, test.getSelector(t)) collections, err := connector.DataCollections(ctx, test.getSelector(t), nil)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, collections) assert.Empty(t, collections)
}) })
@ -542,6 +542,6 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) TestCreateShar
selectors.PrefixMatch(), selectors.PrefixMatch(),
)) ))
_, err := gc.DataCollections(ctx, sel.Selector) _, err := gc.DataCollections(ctx, sel.Selector, nil)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -387,7 +387,7 @@ func runRestoreBackupTest(
t.Logf("Selective backup of %s\n", backupSel) t.Logf("Selective backup of %s\n", backupSel)
start = time.Now() start = time.Now()
dcs, err := backupGC.DataCollections(ctx, backupSel) dcs, err := backupGC.DataCollections(ctx, backupSel, nil)
require.NoError(t, err) require.NoError(t, err)
t.Logf("Backup enumeration complete in %v\n", time.Since(start)) t.Logf("Backup enumeration complete in %v\n", time.Since(start))
@ -855,7 +855,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
backupSel := backupSelectorForExpected(t, test.service, expectedDests) backupSel := backupSelectorForExpected(t, test.service, expectedDests)
t.Log("Selective backup of", backupSel) t.Log("Selective backup of", backupSel)
dcs, err := backupGC.DataCollections(ctx, backupSel) dcs, err := backupGC.DataCollections(ctx, backupSel, nil)
require.NoError(t, err) require.NoError(t, err)
t.Log("Backup enumeration complete") t.Log("Backup enumeration complete")

View File

@ -493,8 +493,17 @@ func inflateDirTree(
return res, ownerCats, nil return res, ownerCats, nil
} }
// BackupCollections takes a set of collections and creates a kopia snapshot
// with the data that they contain. previousSnapshots is used for incremental
// backups and should represent the base snapshot from which metadata is sourced
// from as well as any incomplete snapshot checkpoints that may contain more
// recent data than the base snapshot. The absence of previousSnapshots causes a
// complete backup of all data.
//
// TODO(ashmrtn): Use previousSnapshots parameter.
func (w Wrapper) BackupCollections( func (w Wrapper) BackupCollections(
ctx context.Context, ctx context.Context,
previousSnapshots []*snapshot.Manifest,
collections []data.Collection, collections []data.Collection,
service path.ServiceType, service path.ServiceType,
) (*BackupStats, *details.Details, error) { ) (*BackupStats, *details.Details, error) {

View File

@ -882,7 +882,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
for _, test := range table { for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) { suite.T().Run(test.name, func(t *testing.T) {
stats, deets, err := suite.w.BackupCollections(suite.ctx, collections, path.ExchangeService) stats, deets, err := suite.w.BackupCollections(
suite.ctx,
nil,
collections,
path.ExchangeService,
)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files") assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
@ -933,7 +938,12 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
fp2, err := suite.testPath2.Append(dc2.Names[0], true) fp2, err := suite.testPath2.Append(dc2.Names[0], true)
require.NoError(t, err) require.NoError(t, err)
stats, deets, err := w.BackupCollections(ctx, []data.Collection{dc1, dc2}, path.ExchangeService) stats, deets, err := w.BackupCollections(
ctx,
nil,
[]data.Collection{dc1, dc2},
path.ExchangeService,
)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, path.ExchangeService.String(), deets.Tags[model.ServiceTag]) assert.Equal(t, path.ExchangeService.String(), deets.Tags[model.ServiceTag])
@ -999,7 +1009,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
}, },
} }
stats, deets, err := suite.w.BackupCollections(suite.ctx, collections, path.ExchangeService) stats, deets, err := suite.w.BackupCollections(
suite.ctx,
nil,
collections,
path.ExchangeService,
)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 0, stats.ErrorCount) assert.Equal(t, 0, stats.ErrorCount)
@ -1038,7 +1053,12 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
ctx, flush := tester.NewContext() ctx, flush := tester.NewContext()
defer flush() defer flush()
s, d, err := suite.w.BackupCollections(ctx, test.collections, path.UnknownService) s, d, err := suite.w.BackupCollections(
ctx,
nil,
test.collections,
path.UnknownService,
)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, BackupStats{}, *s) assert.Equal(t, BackupStats{}, *s)
@ -1184,7 +1204,12 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
collections = append(collections, collection) collections = append(collections, collection)
} }
stats, deets, err := suite.w.BackupCollections(suite.ctx, collections, path.ExchangeService) stats, deets, err := suite.w.BackupCollections(
suite.ctx,
nil,
collections,
path.ExchangeService,
)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, stats.ErrorCount, 0) require.Equal(t, stats.ErrorCount, 0)
require.Equal(t, stats.TotalFileCount, expectedFiles) require.Equal(t, stats.TotalFileCount, expectedFiles)

View File

@ -147,7 +147,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
defer closer() defer closer()
defer close(discoverCh) defer close(discoverCh)
cs, err := gc.DataCollections(ctx, op.Selectors) cs, err := gc.DataCollections(ctx, op.Selectors, nil)
if err != nil { if err != nil {
err = errors.Wrap(err, "retrieving service data") err = errors.Wrap(err, "retrieving service data")
opStats.readErr = err opStats.readErr = err
@ -164,7 +164,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
defer close(backupCh) defer close(backupCh)
// hand the results to the consumer // hand the results to the consumer
opStats.k, backupDetails, err = op.kopia.BackupCollections(ctx, cs, op.Selectors.PathService()) opStats.k, backupDetails, err = op.kopia.BackupCollections(ctx, nil, cs, op.Selectors.PathService())
if err != nil { if err != nil {
err = errors.Wrap(err, "backing up service data") err = errors.Wrap(err, "backing up service data")
opStats.writeErr = err opStats.writeErr = err