Compare commits

...

1 Commits

Author SHA1 Message Date
ryanfkeepers
ab0028bb0f setting aside temporarily 2023-07-10 11:07:39 -06:00
10 changed files with 958 additions and 747 deletions

View File

@ -2,6 +2,7 @@ package kopia
import ( import (
"context" "context"
"fmt"
"sort" "sort"
"github.com/alcionai/clues" "github.com/alcionai/clues"
@ -287,6 +288,11 @@ func (b *baseFinder) getBase(
return nil, nil, nil, clues.Wrap(err, "getting snapshots") return nil, nil, nil, clues.Wrap(err, "getting snapshots")
} }
fmt.Printf("\n-----\nmetas %+v\n-----\n", len(metas))
for _, m := range metas {
fmt.Println(m.ID, m.Labels)
}
// No snapshots means no backups so we can just exit here. // No snapshots means no backups so we can just exit here.
if len(metas) == 0 { if len(metas) == 0 {
return nil, nil, nil, nil return nil, nil, nil, nil

View File

@ -428,7 +428,7 @@ func runRestore(
start := time.Now() start := time.Now()
restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService) restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService)
restoreSel := getSelectorWith(t, sci.Service, sci.ResourceOwners, true) restoreSel := selTD.MakeSelector(t, sci.Service, sci.ResourceOwners, true)
deets, err := restoreCtrl.ConsumeRestoreCollections( deets, err := restoreCtrl.ConsumeRestoreCollections(
ctx, ctx,
backupVersion, backupVersion,
@ -997,7 +997,7 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
restoreSel := getSelectorWith(t, test.service, []string{suite.user}, true) restoreSel := selTD.MakeSelector(t, test.service, []string{suite.user}, true)
expectedDests := make([]destAndCats, 0, len(test.collections)) expectedDests := make([]destAndCats, 0, len(test.collections))
allItems := 0 allItems := 0
allExpectedData := map[string]map[string][]byte{} allExpectedData := map[string]map[string][]byte{}

View File

@ -1157,40 +1157,6 @@ func backupSelectorForExpected(
return selectors.Selector{} return selectors.Selector{}
} }
func getSelectorWith(
t *testing.T,
service path.ServiceType,
resourceOwners []string,
forRestore bool,
) selectors.Selector {
switch service {
case path.ExchangeService:
if forRestore {
return selectors.NewExchangeRestore(resourceOwners).Selector
}
return selectors.NewExchangeBackup(resourceOwners).Selector
case path.OneDriveService:
if forRestore {
return selectors.NewOneDriveRestore(resourceOwners).Selector
}
return selectors.NewOneDriveBackup(resourceOwners).Selector
case path.SharePointService:
if forRestore {
return selectors.NewSharePointRestore(resourceOwners).Selector
}
return selectors.NewSharePointBackup(resourceOwners).Selector
default:
require.FailNow(t, "unknown path service")
return selectors.Selector{}
}
}
func newController( func newController(
ctx context.Context, ctx context.Context,
t *testing.T, t *testing.T,

View File

@ -0,0 +1,462 @@
package test_test
import (
"context"
"fmt"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/m365"
"github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/streamstore"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
"github.com/alcionai/corso/src/pkg/store"
)
// ---------------------------------------------------------------------------
// singleton
// ---------------------------------------------------------------------------
type backupInstance struct {
obo *operations.BackupOperation
bod *backupOpDependencies
// forms a linked list of incremental backups
incremental *backupInstance
}
func (bi *backupInstance) close(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
if bi.incremental != nil {
bi.incremental.close(t, ctx)
}
bi.bod.close(t, ctx)
}
func (bi *backupInstance) runAndCheckBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
mb *evmock.Bus,
acceptNoData bool,
) {
err := bi.obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bi.obo.Results, "the backup had non-zero results")
require.NotEmpty(t, bi.obo.Results.BackupID, "the backup generated an ID")
checkBackup(t, *bi.obo, mb, acceptNoData)
}
func (bi *backupInstance) runAndCheckIncrementalBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
mb *evmock.Bus,
) *backupInstance {
// bi.incremental = prepNewTestBackupOp(t, ctx, mb, bi.bod.sel, bi.obo.Options, bi.obo.BackupVersion)
incremental := &backupInstance{
bod: &backupOpDependencies{},
}
// copy the old bod connection references
*incremental.bod = *bi.bod
// generate a new controller to avoid statefulness
incremental.bod.renewController(t, ctx)
incremental.obo = newTestBackupOp(
t,
ctx,
incremental.bod,
mb,
bi.obo.Options)
err := bi.incremental.obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bi.incremental.obo.Results, "the incremental backup had non-zero results")
require.NotEmpty(t, bi.incremental.obo.Results.BackupID, "the incremental backup generated an ID")
return bi.incremental
}
func checkBackup(
t *testing.T,
obo operations.BackupOperation,
mb *evmock.Bus,
acceptNoData bool,
) {
expectStatus := []operations.OpStatus{operations.Completed}
if acceptNoData {
expectStatus = append(expectStatus, operations.NoData)
}
require.Contains(
t,
expectStatus,
obo.Status,
"backup doesn't match expectation, wanted any of %v, got %s",
expectStatus,
obo.Status)
require.Less(t, 0, obo.Results.ItemsWritten)
assert.Less(t, 0, obo.Results.ItemsRead, "count of items read")
assert.Less(t, int64(0), obo.Results.BytesRead, "bytes read")
assert.Less(t, int64(0), obo.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, 1, obo.Results.ResourceOwners, "count of resource owners")
assert.NoErrorf(
t,
obo.Errors.Failure(),
"incremental non-recoverable error %+v",
clues.ToCore(obo.Errors.Failure()))
assert.Empty(t, obo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
obo.Results.BackupID, "incremental pre-run backupID event")
}
func checkIncrementalBackup(
t *testing.T,
obo operations.BackupOperation,
mb *evmock.Bus,
) {
assert.NoError(t, obo.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(obo.Errors.Failure()))
assert.Empty(t, obo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "incremental backup-end events")
// FIXME: commented tests are flaky due to delta calls retaining data that is
// out of scope of the test data.
// we need to find a better way to make isolated assertions here.
// The addition of the deeTD package gives us enough coverage to comment
// out the tests for now and look to their improvemeng later.
// do some additional checks to ensure the incremental dealt with fewer items.
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
// if !toggles.DisableDelta {
// assert.Equal(t, test.deltaItemsRead+4, incBO.Results.ItemsRead, "incremental items read")
// assert.Equal(t, test.deltaItemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
// } else {
// assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
// assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
// }
// assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
obo.Results.BackupID, "incremental pre-run backupID event")
}
// ---------------------------------------------------------------------------
// initialization and dependencies
// ---------------------------------------------------------------------------
type backupOpDependencies struct {
acct account.Account
ctrl *m365.Controller
kms *kopia.ModelStore
kw *kopia.Wrapper
sel selectors.Selector
sss streamstore.Streamer
st storage.Storage
sw *store.Wrapper
closer func()
}
// prepNewTestBackupOp generates all clients required to run a backup operation,
// returning both a backup operation created with those clients, as well as
// the clients themselves.
func prepNewTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bus events.Eventer,
sel selectors.Selector,
opts control.Options,
backupVersion int,
) *backupInstance {
bod := &backupOpDependencies{
acct: tconfig.NewM365Account(t),
st: storeTD.NewPrefixedS3Storage(t),
}
k := kopia.NewConn(bod.st)
err := k.Initialize(ctx, repository.Options{})
require.NoError(t, err, clues.ToCore(err))
defer func() {
if err != nil {
bod.close(t, ctx)
t.FailNow()
}
}()
// kopiaRef comes with a count of 1 and Wrapper bumps it again
// we're so safe to close here.
bod.closer = func() {
err := k.Close(ctx)
assert.NoErrorf(t, err, "k close: %+v", clues.ToCore(err))
}
bod.kw, err = kopia.NewWrapper(k)
require.NoError(t, err, clues.ToCore(err))
bod.kms, err = kopia.NewModelStore(k)
require.NoError(t, err, clues.ToCore(err))
bod.sw = store.NewKopiaStore(bod.kms)
connectorResource := resource.Users
if sel.Service == selectors.ServiceSharePoint {
connectorResource = resource.Sites
}
bod.ctrl, bod.sel = ControllerWithSelector(
t,
ctx,
bod.acct,
connectorResource,
sel,
nil,
bod.close)
obo := newTestBackupOp(
t,
ctx,
bod,
bus,
opts)
bod.sss = streamstore.NewStreamer(
bod.kw,
bod.acct.ID(),
bod.sel.PathService())
return &backupInstance{
obo: obo,
bod: bod,
}
}
func (bod *backupOpDependencies) close(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
bod.closer()
if bod.kw != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kw close: %+v", clues.ToCore(err))
}
if bod.kms != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kms close: %+v", clues.ToCore(err))
}
}
// generates a new controller, and replaces bod.ctrl with that instance.
// useful for clearing controller state between runs.
func (bod *backupOpDependencies) renewController(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
rc := resource.Users
if bod.sel.PathService() == path.SharePointService {
rc = resource.Sites
}
newCtrl, err := m365.NewController(
ctx,
bod.acct,
rc,
bod.sel.PathService(),
control.Defaults())
require.NoError(t, err, clues.ToCore(err))
bod.ctrl = newCtrl
}
// newTestBackupOp accepts the clients required to compose a backup operation, plus
// any other metadata, and uses them to generate a new backup operation. This
// allows backup chains to utilize the same temp directory and configuration
// details.
func newTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bus events.Eventer,
opts control.Options,
) *operations.BackupOperation {
bod.ctrl.IDNameLookup = idname.NewCache(map[string]string{bod.sel.ID(): bod.sel.Name()})
bo, err := operations.NewBackupOperation(
ctx,
opts,
bod.kw,
bod.sw,
bod.ctrl,
bod.acct,
bod.sel,
bod.sel,
bus)
if !assert.NoError(t, err, clues.ToCore(err)) {
bod.close(t, ctx)
t.FailNow()
}
return &bo
}
func checkBackupIsInManifests(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bo *operations.BackupOperation,
sel selectors.Selector,
resourceOwner string,
categories ...path.CategoryType,
) {
for _, category := range categories {
t.Run("backup_in_manifests_"+category.String(), func(t *testing.T) {
var (
reasons = []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: sel.PathService(),
Category: category,
},
}
tags = map[string]string{kopia.TagBackupCategory: ""}
found bool
)
bf, err := bod.kw.NewBaseFinder(bod.sw)
require.NoError(t, err, clues.ToCore(err))
fmt.Printf("\n-----\nR %+v\nT %+v\n-----\n", reasons, tags)
mans := bf.FindBases(ctx, reasons, tags)
mmb := mans.MergeBases()
require.NotEmpty(t, mmb, "should find at least one merge base")
t.Log("Backup IDs from merge bases:")
for _, man := range mmb {
bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue
}
t.Log("-", bID)
if bID == string(bo.Results.BackupID) {
found = true
break
}
}
assert.True(t, found, "backup %q retrieved by previous snapshot manifest", bo.Results.BackupID)
})
}
}
func checkMetadataFilesExist(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID,
bod *backupOpDependencies,
tenant, resourceOwner string,
service path.ServiceType,
filesByCat map[path.CategoryType][]string,
) {
for category, files := range filesByCat {
t.Run("metadata_files_exist_"+category.String(), func(t *testing.T) {
bup := &backup.Backup{}
err := bod.kms.Get(ctx, model.BackupSchema, backupID, bup)
if !assert.NoError(t, err, clues.ToCore(err)) {
return
}
paths := []path.RestorePaths{}
pathsByRef := map[string][]string{}
for _, fName := range files {
p, err := path.Builder{}.
Append(fName).
ToServiceCategoryMetadataPath(tenant, resourceOwner, service, category, true)
if !assert.NoError(t, err, "bad metadata path", clues.ToCore(err)) {
continue
}
dir, err := p.Dir()
if !assert.NoError(t, err, "parent path", clues.ToCore(err)) {
continue
}
paths = append(
paths,
path.RestorePaths{StoragePath: p, RestorePath: dir})
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
}
cols, err := bod.kw.ProduceRestoreCollections(
ctx,
bup.SnapshotID,
paths,
nil,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
for _, col := range cols {
itemNames := []string{}
for item := range col.Items(ctx, fault.New(true)) {
assert.Implements(t, (*data.StreamSize)(nil), item)
s := item.(data.StreamSize)
assert.Greaterf(
t,
s.Size(),
int64(0),
"empty metadata file: %s/%s",
col.FullPath(),
item.UUID(),
)
itemNames = append(itemNames, item.UUID())
}
assert.ElementsMatchf(
t,
pathsByRef[col.FullPath().ShortRef()],
itemNames,
"collection %s missing expected files",
col.FullPath(),
)
}
})
}
}

View File

@ -8,7 +8,6 @@ import (
"github.com/alcionai/clues" "github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/users" "github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
@ -16,7 +15,6 @@ import (
"github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/dttm"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock" inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock" evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/m365/exchange" "github.com/alcionai/corso/src/internal/m365/exchange"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock" exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
@ -34,64 +32,81 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
) )
type ExchangeBackupIntgSuite struct { type ExchangeIntgSuite struct {
tester.Suite tester.Suite
its intgTesterSetup its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
} }
func TestExchangeBackupIntgSuite(t *testing.T) { func TestExchangeIntgSuite(t *testing.T) {
suite.Run(t, &ExchangeBackupIntgSuite{ suite.Run(t, &ExchangeIntgSuite{
Suite: tester.NewIntegrationSuite( Suite: tester.NewIntegrationSuite(
t, t,
[][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}), [][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}),
}) })
} }
func (suite *ExchangeBackupIntgSuite) SetupSuite() { func (suite *ExchangeIntgSuite) SetupSuite() {
suite.its = newIntegrationTesterSetup(suite.T()) t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(t)
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(
sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()),
sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
sel.DiscreteOwner = suite.its.userID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
}
func (suite *ExchangeIntgSuite) TeardownSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
if suite.bi != nil {
suite.bi.close(t, ctx)
}
} }
// TestBackup_Run ensures that Integration Testing works // TestBackup_Run ensures that Integration Testing works
// for the following scopes: Contacts, Events, and Mail // for the following scopes: Contacts, Events, and Mail
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() { func (suite *ExchangeIntgSuite) TestBackup_Run_exchange() {
tests := []struct { tests := []struct {
name string name string
selector func() *selectors.ExchangeBackup
category path.CategoryType category path.CategoryType
metadataFiles []string metadataFiles []string
}{ }{
{ {
name: "Mail", name: "Mail",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
sel.DiscreteOwner = suite.its.userID
return sel
},
category: path.EmailCategory, category: path.EmailCategory,
metadataFiles: exchange.MetadataFileNames(path.EmailCategory), metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
}, },
{ // {
name: "Contacts", // name: "Contacts",
selector: func() *selectors.ExchangeBackup { // category: path.ContactsCategory,
sel := selectors.NewExchangeBackup([]string{suite.its.userID}) // metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch())) // },
return sel // {
}, // name: "Events",
category: path.ContactsCategory, // category: path.EventsCategory,
metadataFiles: exchange.MetadataFileNames(path.ContactsCategory), // metadataFiles: exchange.MetadataFileNames(path.EventsCategory),
}, // },
{
name: "Calendar Events",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()))
return sel
},
category: path.EventsCategory,
metadataFiles: exchange.MetadataFileNames(path.EventsCategory),
},
} }
for _, test := range tests { for _, test := range tests {
suite.Run(test.name, func() { suite.Run(test.name, func() {
@ -101,50 +116,45 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
defer flush() defer flush()
var ( var (
mb = evmock.NewBus() bod = suite.bi.bod
sel = test.selector().Selector sel = suite.bi.bod.sel
opts = control.Defaults() obo = suite.bi.obo
userID = suite.its.userID
whatSet = deeTD.CategoryFromRepoRef whatSet = deeTD.CategoryFromRepoRef
) )
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup) fmt.Printf("\n-----\n%s BUPs\n", test.name)
defer bod.close(t, ctx) ibii := suite.bi
for ibii != nil {
fmt.Println(ibii.obo.Results.BackupID)
ibii = ibii.incremental
}
sel = bod.sel fmt.Printf("-----\n")
userID := sel.ID()
m365, err := bod.acct.M365Config()
require.NoError(t, err, clues.ToCore(err))
// run the tests
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests( checkBackupIsInManifests(
t, t,
ctx, ctx,
bod.kw, bod,
bod.sw, obo,
&bo,
sel, sel,
userID, userID,
test.category) test.category)
checkMetadataFilesExist( checkMetadataFilesExist(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
bod.kw, bod,
bod.kms, suite.its.acct.ID(),
m365.AzureTenantID,
userID, userID,
path.ExchangeService, path.ExchangeService,
map[path.CategoryType][]string{test.category: test.metadataFiles}) map[path.CategoryType][]string{test.category: test.metadataFiles})
_, expectDeets := deeTD.GetDeetsInBackup( _, expectDeets := deeTD.GetDeetsInBackup(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
bod.acct.ID(), bod.acct.ID(),
userID, sel,
path.ExchangeService, path.ExchangeService,
whatSet, whatSet,
bod.kms, bod.kms,
@ -152,82 +162,82 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
whatSet, whatSet,
bod.kms, bod.kms,
bod.sss, bod.sss,
expectDeets, expectDeets,
false) false)
// Basic, happy path incremental test. No changes are dictated or expected.
// This only tests that an incremental backup is runnable at all, and that it
// produces fewer results than the last backup.
var (
incMB = evmock.NewBus()
incBO = newTestBackupOp(
t,
ctx,
bod,
incMB,
opts)
)
runAndCheckBackup(t, ctx, &incBO, incMB, true)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&incBO,
sel,
userID,
test.category)
checkMetadataFilesExist(
t,
ctx,
incBO.Results.BackupID,
bod.kw,
bod.kms,
m365.AzureTenantID,
userID,
path.ExchangeService,
map[path.CategoryType][]string{test.category: test.metadataFiles})
deeTD.CheckBackupDetails(
t,
ctx,
incBO.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
// do some additional checks to ensure the incremental dealt with fewer items.
assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
assert.Greater(t, bo.Results.ItemsRead, incBO.Results.ItemsRead, "incremental items read")
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
incBO.Results.BackupID, "incremental backupID pre-declaration")
}) })
} }
// // Basic, happy path incremental test. No changes are dictated or expected.
// // This only tests that an incremental backup is runnable at all, and that it
// // produces fewer results than the last backup.
// var (
// incMB = evmock.NewBus()
// incBO = newTestBackupOp(
// t,
// ctx,
// bod,
// incMB,
// opts)
// )
// runAndCheckBackup(t, ctx, &incBO, incMB, true)
// checkBackupIsInManifests(
// t,
// ctx,
// bod.kw,
// bod.sw,
// &incBO,
// sel,
// userID,
// test.category)
// checkMetadataFilesExist(
// t,
// ctx,
// incBO.Results.BackupID,
// bod.kw,
// bod.kms,
// m365.AzureTenantID,
// userID,
// path.ExchangeService,
// map[path.CategoryType][]string{test.category: test.metadataFiles})
// deeTD.CheckBackupDetails(
// t,
// ctx,
// incBO.Results.BackupID,
// whatSet,
// bod.kms,
// bod.sss,
// expectDeets,
// false)
// // do some additional checks to ensure the incremental dealt with fewer items.
// assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
// assert.Greater(t, bo.Results.ItemsRead, incBO.Results.ItemsRead, "incremental items read")
// assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
// assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
// assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
// assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
// assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
// assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
// assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
// assert.Equal(t,
// incMB.CalledWith[events.BackupStart][0][events.BackupID],
// incBO.Results.BackupID, "incremental backupID pre-declaration")
} }
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalExchange() { func (suite *ExchangeIntgSuite) TestBackup_Run_incrementalExchange() {
testExchangeContinuousBackups(suite, control.Toggles{}) testExchangeContinuousBackups(suite, control.Toggles{})
} }
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalNonDeltaExchange() { func (suite *ExchangeIntgSuite) TestBackup_Run_incrementalNonDeltaExchange() {
testExchangeContinuousBackups(suite, control.Toggles{DisableDelta: true}) testExchangeContinuousBackups(suite, control.Toggles{DisableDelta: true})
} }
func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles control.Toggles) { func testExchangeContinuousBackups(suite *ExchangeIntgSuite, toggles control.Toggles) {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
@ -346,6 +356,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
// populate initial test data // populate initial test data
for category, gen := range dataset { for category, gen := range dataset {
for destName := range gen.dests { for destName := range gen.dests {
rc := control.DefaultRestoreConfig("")
rc.Location = destName
deets := generateContainerOfItems( deets := generateContainerOfItems(
t, t,
ctx, ctx,
@ -356,7 +369,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
creds.AzureTenantID, creds.AzureTenantID,
uidn.ID(), uidn.ID(),
"", "",
destName, rc,
2, 2,
version.Backup, version.Backup,
gen.dbf) gen.dbf)
@ -380,11 +393,10 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
} }
} }
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup) // run the initial incremental backup
defer bod.close(t, ctx) ibi := suite.bi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
// run the initial backup bod := ibi.bod
runAndCheckBackup(t, ctx, &bo, mb, false)
rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory) rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
@ -394,9 +406,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
bupDeets, _ := deeTD.GetDeetsInBackup( bupDeets, _ := deeTD.GetDeetsInBackup(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
acct.ID(), acct.ID(),
uidn.ID(), uidn,
service, service,
whatSet, whatSet,
bod.kms, bod.kms,
@ -467,7 +479,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
whatSet, whatSet,
bod.kms, bod.kms,
bod.sss, bod.sss,
@ -553,6 +565,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
name: "add a new folder", name: "add a new folder",
updateUserData: func(t *testing.T, ctx context.Context) { updateUserData: func(t *testing.T, ctx context.Context) {
for category, gen := range dataset { for category, gen := range dataset {
rc := control.DefaultRestoreConfig("")
rc.Location = container3
deets := generateContainerOfItems( deets := generateContainerOfItems(
t, t,
ctx, ctx,
@ -560,7 +575,10 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
service, service,
category, category,
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector, selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
creds.AzureTenantID, suite.its.userID, "", container3, creds.AzureTenantID,
suite.its.userID,
"",
rc,
2, 2,
version.Backup, version.Backup,
gen.dbf) gen.dbf)
@ -763,15 +781,17 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
var ( var (
t = suite.T() t = suite.T()
incMB = evmock.NewBus() mb = evmock.NewBus()
atid = creds.AzureTenantID atid = creds.AzureTenantID
ctx, flush = tester.WithContext(t, ctx)
) )
ctx, flush := tester.WithContext(t, ctx)
defer flush() defer flush()
incBO := newTestBackupOp(t, ctx, bod, incMB, opts) ibi = ibi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
suite.Run("PreTestSetup", func() { suite.Run("PreTestSetup", func() {
t := suite.T() t := suite.T()
@ -782,17 +802,16 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
test.updateUserData(t, ctx) test.updateUserData(t, ctx)
}) })
err := incBO.Run(ctx) bupID := obo.Results.BackupID
require.NoError(t, err, clues.ToCore(err))
bupID := incBO.Results.BackupID err := obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
checkBackupIsInManifests( checkBackupIsInManifests(
t, t,
ctx, ctx,
bod.kw, bod,
bod.sw, obo,
&incBO,
sels, sels,
uidn.ID(), uidn.ID(),
maps.Keys(categories)...) maps.Keys(categories)...)
@ -800,8 +819,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
t, t,
ctx, ctx,
bupID, bupID,
bod.kw, bod,
bod.kms,
atid, atid,
uidn.ID(), uidn.ID(),
service, service,
@ -815,30 +833,6 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
bod.sss, bod.sss,
expectDeets, expectDeets,
true) true)
// FIXME: commented tests are flaky due to delta calls retaining data that is
// out of scope of the test data.
// we need to find a better way to make isolated assertions here.
// The addition of the deeTD package gives us enough coverage to comment
// out the tests for now and look to their improvemeng later.
// do some additional checks to ensure the incremental dealt with fewer items.
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
// if !toggles.DisableDelta {
// assert.Equal(t, test.deltaItemsRead+4, incBO.Results.ItemsRead, "incremental items read")
// assert.Equal(t, test.deltaItemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
// } else {
// assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
// assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
// }
// assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
}) })
} }
} }

View File

@ -2,7 +2,6 @@ package test_test
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time" "time"
@ -15,24 +14,16 @@ import (
"github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/m365" "github.com/alcionai/corso/src/internal/m365"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock" exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts" odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
"github.com/alcionai/corso/src/internal/m365/resource" "github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/streamstore"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/count" "github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/extensions" "github.com/alcionai/corso/src/pkg/extensions"
"github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault"
@ -40,9 +31,6 @@ import (
"github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api"
"github.com/alcionai/corso/src/pkg/services/m365/api/mock" "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
"github.com/alcionai/corso/src/pkg/store"
) )
// Does not use the tester.DefaultTestRestoreDestination syntax as some of these // Does not use the tester.DefaultTestRestoreDestination syntax as some of these
@ -50,308 +38,69 @@ import (
// they get clearly selected without accidental overlap. // they get clearly selected without accidental overlap.
const incrementalsDestContainerPrefix = "incrementals_ci_" const incrementalsDestContainerPrefix = "incrementals_ci_"
type backupOpDependencies struct { // ---------------------------------------------------------------------------
acct account.Account // Suite Setup
ctrl *m365.Controller // ---------------------------------------------------------------------------
kms *kopia.ModelStore
kw *kopia.Wrapper
sel selectors.Selector
sss streamstore.Streamer
st storage.Storage
sw *store.Wrapper
closer func() type intgTesterSetup struct {
ac api.Client
gockAC api.Client
acct account.Account
userID string
userDriveID string
userDriveRootFolderID string
siteID string
siteDriveID string
siteDriveRootFolderID string
} }
func (bod *backupOpDependencies) close( func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
t *testing.T, its := intgTesterSetup{}
ctx context.Context, //revive:disable-line:context-as-argument
) {
bod.closer()
if bod.kw != nil { ctx, flush := tester.NewContext(t)
err := bod.kw.Close(ctx) defer flush()
assert.NoErrorf(t, err, "kw close: %+v", clues.ToCore(err))
}
if bod.kms != nil { graph.InitializeConcurrencyLimiter(ctx, true, 4)
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kms close: %+v", clues.ToCore(err))
}
}
// prepNewTestBackupOp generates all clients required to run a backup operation, its.acct = tconfig.NewM365Account(t)
// returning both a backup operation created with those clients, as well as creds, err := its.acct.M365Config()
// the clients themselves.
func prepNewTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bus events.Eventer,
sel selectors.Selector,
opts control.Options,
backupVersion int,
) (
operations.BackupOperation,
*backupOpDependencies,
) {
bod := &backupOpDependencies{
acct: tconfig.NewM365Account(t),
st: storeTD.NewPrefixedS3Storage(t),
}
k := kopia.NewConn(bod.st)
err := k.Initialize(ctx, repository.Options{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
defer func() { its.ac, err = api.NewClient(creds)
if err != nil {
bod.close(t, ctx)
t.FailNow()
}
}()
// kopiaRef comes with a count of 1 and Wrapper bumps it again
// we're so safe to close here.
bod.closer = func() {
err := k.Close(ctx)
assert.NoErrorf(t, err, "k close: %+v", clues.ToCore(err))
}
bod.kw, err = kopia.NewWrapper(k)
if !assert.NoError(t, err, clues.ToCore(err)) {
return operations.BackupOperation{}, nil
}
bod.kms, err = kopia.NewModelStore(k)
if !assert.NoError(t, err, clues.ToCore(err)) {
return operations.BackupOperation{}, nil
}
bod.sw = store.NewKopiaStore(bod.kms)
connectorResource := resource.Users
if sel.Service == selectors.ServiceSharePoint {
connectorResource = resource.Sites
}
bod.ctrl, bod.sel = ControllerWithSelector(
t,
ctx,
bod.acct,
connectorResource,
sel,
nil,
bod.close)
bo := newTestBackupOp(
t,
ctx,
bod,
bus,
opts)
bod.sss = streamstore.NewStreamer(
bod.kw,
bod.acct.ID(),
bod.sel.PathService())
return bo, bod
}
// newTestBackupOp accepts the clients required to compose a backup operation, plus
// any other metadata, and uses them to generate a new backup operation. This
// allows backup chains to utilize the same temp directory and configuration
// details.
func newTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bus events.Eventer,
opts control.Options,
) operations.BackupOperation {
bod.ctrl.IDNameLookup = idname.NewCache(map[string]string{bod.sel.ID(): bod.sel.Name()})
bo, err := operations.NewBackupOperation(
ctx,
opts,
bod.kw,
bod.sw,
bod.ctrl,
bod.acct,
bod.sel,
bod.sel,
bus)
if !assert.NoError(t, err, clues.ToCore(err)) {
bod.close(t, ctx)
t.FailNow()
}
return bo
}
func runAndCheckBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bo *operations.BackupOperation,
mb *evmock.Bus,
acceptNoData bool,
) {
err := bo.Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results, "the backup had non-zero results")
require.NotEmpty(t, bo.Results.BackupID, "the backup generated an ID")
expectStatus := []operations.OpStatus{operations.Completed} its.gockAC, err = mock.NewClient(creds)
if acceptNoData { require.NoError(t, err, clues.ToCore(err))
expectStatus = append(expectStatus, operations.NoData)
}
require.Contains( // user drive
t,
expectStatus,
bo.Status,
"backup doesn't match expectation, wanted any of %v, got %s",
expectStatus,
bo.Status)
require.Less(t, 0, bo.Results.ItemsWritten) its.userID = tconfig.M365UserID(t)
assert.Less(t, 0, bo.Results.ItemsRead, "count of items read")
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
assert.NoError(t, bo.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, bo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
bo.Results.BackupID, "backupID pre-declaration")
}
func checkBackupIsInManifests( userDrive, err := its.ac.Users().GetDefaultDrive(ctx, its.userID)
t *testing.T, require.NoError(t, err, clues.ToCore(err))
ctx context.Context, //revive:disable-line:context-as-argument
kw *kopia.Wrapper,
sw *store.Wrapper,
bo *operations.BackupOperation,
sel selectors.Selector,
resourceOwner string,
categories ...path.CategoryType,
) {
for _, category := range categories {
t.Run(category.String(), func(t *testing.T) {
var (
reasons = []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: sel.PathService(),
Category: category,
},
}
tags = map[string]string{kopia.TagBackupCategory: ""}
found bool
)
bf, err := kw.NewBaseFinder(sw) its.userDriveID = ptr.Val(userDrive.GetId())
require.NoError(t, err, clues.ToCore(err))
mans := bf.FindBases(ctx, reasons, tags) userDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.userDriveID)
for _, man := range mans.MergeBases() { require.NoError(t, err, clues.ToCore(err))
bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue
}
if bID == string(bo.Results.BackupID) { its.userDriveRootFolderID = ptr.Val(userDriveRootFolder.GetId())
found = true
break
}
}
assert.True(t, found, "backup retrieved by previous snapshot manifest") its.siteID = tconfig.M365SiteID(t)
})
}
}
func checkMetadataFilesExist( // site
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID,
kw *kopia.Wrapper,
ms *kopia.ModelStore,
tenant, resourceOwner string,
service path.ServiceType,
filesByCat map[path.CategoryType][]string,
) {
for category, files := range filesByCat {
t.Run(category.String(), func(t *testing.T) {
bup := &backup.Backup{}
err := ms.Get(ctx, model.BackupSchema, backupID, bup) siteDrive, err := its.ac.Sites().GetDefaultDrive(ctx, its.siteID)
if !assert.NoError(t, err, clues.ToCore(err)) { require.NoError(t, err, clues.ToCore(err))
return
}
paths := []path.RestorePaths{} its.siteDriveID = ptr.Val(siteDrive.GetId())
pathsByRef := map[string][]string{}
for _, fName := range files { siteDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.siteDriveID)
p, err := path.Builder{}. require.NoError(t, err, clues.ToCore(err))
Append(fName).
ToServiceCategoryMetadataPath(tenant, resourceOwner, service, category, true)
if !assert.NoError(t, err, "bad metadata path", clues.ToCore(err)) {
continue
}
dir, err := p.Dir() its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId())
if !assert.NoError(t, err, "parent path", clues.ToCore(err)) {
continue
}
paths = append( return its
paths,
path.RestorePaths{StoragePath: p, RestorePath: dir})
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
}
cols, err := kw.ProduceRestoreCollections(
ctx,
bup.SnapshotID,
paths,
nil,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
for _, col := range cols {
itemNames := []string{}
for item := range col.Items(ctx, fault.New(true)) {
assert.Implements(t, (*data.StreamSize)(nil), item)
s := item.(data.StreamSize)
assert.Greaterf(
t,
s.Size(),
int64(0),
"empty metadata file: %s/%s",
col.FullPath(),
item.UUID(),
)
itemNames = append(itemNames, item.UUID())
}
assert.ElementsMatchf(
t,
pathsByRef[col.FullPath().ShortRef()],
itemNames,
"collection %s missing expected files",
col.FullPath(),
)
}
})
}
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -373,7 +122,8 @@ func generateContainerOfItems(
service path.ServiceType, service path.ServiceType,
cat path.CategoryType, cat path.CategoryType,
sel selectors.Selector, sel selectors.Selector,
tenantID, resourceOwner, driveID, destFldr string, tenantID, resourceOwner, driveID string,
rc control.RestoreConfig,
howManyItems int, howManyItems int,
backupVersion int, backupVersion int,
dbf dataBuilderFunc, dbf dataBuilderFunc,
@ -391,11 +141,11 @@ func generateContainerOfItems(
}) })
} }
pathFolders := []string{destFldr} pathFolders := []string{}
switch service { switch service {
case path.OneDriveService, path.SharePointService: case path.OneDriveService, path.SharePointService:
pathFolders = []string{odConsts.DrivesPathDir, driveID, odConsts.RootPathDir, destFldr} pathFolders = []string{odConsts.DrivesPathDir, driveID, odConsts.RootPathDir}
} }
collections := []incrementalCollection{{ collections := []incrementalCollection{{
@ -404,14 +154,10 @@ func generateContainerOfItems(
items: items, items: items,
}} }}
restoreCfg := control.DefaultRestoreConfig(dttm.SafeForTesting)
restoreCfg.Location = destFldr
dataColls := buildCollections( dataColls := buildCollections(
t, t,
service, service,
tenantID, resourceOwner, tenantID, resourceOwner,
restoreCfg,
collections) collections)
opts := control.Defaults() opts := control.Defaults()
@ -421,7 +167,7 @@ func generateContainerOfItems(
ctx, ctx,
backupVersion, backupVersion,
sel, sel,
restoreCfg, rc,
opts, opts,
dataColls, dataColls,
fault.New(true), fault.New(true),
@ -467,7 +213,6 @@ func buildCollections(
t *testing.T, t *testing.T,
service path.ServiceType, service path.ServiceType,
tenant, user string, tenant, user string,
restoreCfg control.RestoreConfig,
colls []incrementalCollection, colls []incrementalCollection,
) []data.RestoreCollection { ) []data.RestoreCollection {
t.Helper() t.Helper()
@ -475,14 +220,8 @@ func buildCollections(
collections := make([]data.RestoreCollection, 0, len(colls)) collections := make([]data.RestoreCollection, 0, len(colls))
for _, c := range colls { for _, c := range colls {
pth := toDataLayerPath( pth, err := path.Build(tenant, user, service, c.category, false, c.pathFolders...)
t, require.NoError(t, err, clues.ToCore(err))
service,
tenant,
user,
c.category,
c.pathFolders,
false)
mc := exchMock.NewCollection(pth, pth, len(c.items)) mc := exchMock.NewCollection(pth, pth, len(c.items))
@ -497,38 +236,6 @@ func buildCollections(
return collections return collections
} }
func toDataLayerPath(
t *testing.T,
service path.ServiceType,
tenant, resourceOwner string,
category path.CategoryType,
elements []string,
isItem bool,
) path.Path {
t.Helper()
var (
pb = path.Builder{}.Append(elements...)
p path.Path
err error
)
switch service {
case path.ExchangeService:
p, err = pb.ToDataLayerExchangePathForCategory(tenant, resourceOwner, category, isItem)
case path.OneDriveService:
p, err = pb.ToDataLayerOneDrivePath(tenant, resourceOwner, isItem)
case path.SharePointService:
p, err = pb.ToDataLayerSharePointPath(tenant, resourceOwner, category, isItem)
default:
err = clues.New(fmt.Sprintf("unknown service: %s", service))
}
require.NoError(t, err, clues.ToCore(err))
return p
}
// A QoL builder for live instances that updates // A QoL builder for live instances that updates
// the selector's owner id and name in the process // the selector's owner id and name in the process
// to help avoid gotchas. // to help avoid gotchas.
@ -564,70 +271,6 @@ func ControllerWithSelector(
return ctrl, sel return ctrl, sel
} }
// ---------------------------------------------------------------------------
// Suite Setup
// ---------------------------------------------------------------------------
type intgTesterSetup struct {
ac api.Client
gockAC api.Client
userID string
userDriveID string
userDriveRootFolderID string
siteID string
siteDriveID string
siteDriveRootFolderID string
}
func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
its := intgTesterSetup{}
ctx, flush := tester.NewContext(t)
defer flush()
graph.InitializeConcurrencyLimiter(ctx, true, 4)
a := tconfig.NewM365Account(t)
creds, err := a.M365Config()
require.NoError(t, err, clues.ToCore(err))
its.ac, err = api.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
its.gockAC, err = mock.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
// user drive
its.userID = tconfig.M365UserID(t)
userDrive, err := its.ac.Users().GetDefaultDrive(ctx, its.userID)
require.NoError(t, err, clues.ToCore(err))
its.userDriveID = ptr.Val(userDrive.GetId())
userDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.userDriveID)
require.NoError(t, err, clues.ToCore(err))
its.userDriveRootFolderID = ptr.Val(userDriveRootFolder.GetId())
its.siteID = tconfig.M365SiteID(t)
// site
siteDrive, err := its.ac.Sites().GetDefaultDrive(ctx, its.siteID)
require.NoError(t, err, clues.ToCore(err))
its.siteDriveID = ptr.Val(siteDrive.GetId())
siteDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.siteDriveID)
require.NoError(t, err, clues.ToCore(err))
its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId())
return its
}
func getTestExtensionFactories() []extensions.CreateItemExtensioner { func getTestExtensionFactories() []extensions.CreateItemExtensioner {
return []extensions.CreateItemExtensioner{ return []extensions.CreateItemExtensioner{
&extensions.MockItemExtensionFactory{}, &extensions.MockItemExtensionFactory{},

View File

@ -43,6 +43,9 @@ import (
type OneDriveBackupIntgSuite struct { type OneDriveBackupIntgSuite struct {
tester.Suite tester.Suite
its intgTesterSetup its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
} }
func TestOneDriveBackupIntgSuite(t *testing.T) { func TestOneDriveBackupIntgSuite(t *testing.T) {
@ -54,7 +57,24 @@ func TestOneDriveBackupIntgSuite(t *testing.T) {
} }
func (suite *OneDriveBackupIntgSuite) SetupSuite() { func (suite *OneDriveBackupIntgSuite) SetupSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(suite.T()) suite.its = newIntegrationTesterSetup(suite.T())
sel := selectors.NewOneDriveBackup([]string{suite.its.siteID})
sel.Include(selTD.OneDriveBackupFolderScope(sel))
sel.DiscreteOwner = suite.its.userID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
} }
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() { func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
@ -64,39 +84,37 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
defer flush() defer flush()
var ( var (
tenID = tconfig.M365TenantID(t) bod = suite.bi.bod
mb = evmock.NewBus() sel = suite.bi.bod.sel
userID = tconfig.SecondaryM365UserID(t) obo = suite.bi.obo
osel = selectors.NewOneDriveBackup([]string{userID}) siteID = suite.its.siteID
ws = deeTD.DriveIDFromRepoRef whatSet = deeTD.DriveIDFromRepoRef
svc = path.OneDriveService
opts = control.Defaults()
) )
osel.Include(selTD.OneDriveBackupFolderScope(osel)) checkBackupIsInManifests(
t,
bo, bod := prepNewTestBackupOp(t, ctx, mb, osel.Selector, opts, version.Backup) ctx,
defer bod.close(t, ctx) bod,
obo,
runAndCheckBackup(t, ctx, &bo, mb, false) sel,
siteID,
bID := bo.Results.BackupID path.LibrariesCategory)
_, expectDeets := deeTD.GetDeetsInBackup( _, expectDeets := deeTD.GetDeetsInBackup(
t, t,
ctx, ctx,
bID, obo.Results.BackupID,
tenID, bod.acct.ID(),
bod.sel.ID(), sel,
svc, path.OneDriveService,
ws, whatSet,
bod.kms, bod.kms,
bod.sss) bod.sss)
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bID, obo.Results.BackupID,
ws, whatSet,
bod.kms, bod.kms,
bod.sss, bod.sss,
expectDeets, expectDeets,
@ -135,6 +153,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
runDriveIncrementalTest( runDriveIncrementalTest(
suite, suite,
suite.bi,
suite.its.userID, suite.its.userID,
suite.its.userID, suite.its.userID,
resource.Users, resource.Users,
@ -148,6 +167,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
func runDriveIncrementalTest( func runDriveIncrementalTest(
suite tester.Suite, suite tester.Suite,
bi *backupInstance,
owner, permissionsUser string, owner, permissionsUser string,
rc resource.Category, rc resource.Category,
service path.ServiceType, service path.ServiceType,
@ -164,7 +184,6 @@ func runDriveIncrementalTest(
var ( var (
acct = tconfig.NewM365Account(t) acct = tconfig.NewM365Account(t)
opts = control.Defaults()
mb = evmock.NewBus() mb = evmock.NewBus()
ws = deeTD.DriveIDFromRepoRef ws = deeTD.DriveIDFromRepoRef
@ -223,6 +242,9 @@ func runDriveIncrementalTest(
// through the changes. This should be enough to cover most delta // through the changes. This should be enough to cover most delta
// actions. // actions.
for _, destName := range genDests { for _, destName := range genDests {
rc := control.DefaultRestoreConfig("")
rc.Location = destName
deets := generateContainerOfItems( deets := generateContainerOfItems(
t, t,
ctx, ctx,
@ -230,7 +252,10 @@ func runDriveIncrementalTest(
service, service,
category, category,
sel, sel,
atid, roidn.ID(), driveID, destName, atid,
roidn.ID(),
driveID,
rc,
2, 2,
// Use an old backup version so we don't need metadata files. // Use an old backup version so we don't need metadata files.
0, 0,
@ -260,20 +285,19 @@ func runDriveIncrementalTest(
containerIDs[destName] = ptr.Val(resp.GetId()) containerIDs[destName] = ptr.Val(resp.GetId())
} }
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup) // run the initial incremental backup
defer bod.close(t, ctx) ibi := bi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
sel = bod.sel sel = bod.sel
// run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false)
// precheck to ensure the expectedDeets are correct. // precheck to ensure the expectedDeets are correct.
// if we fail here, the expectedDeets were populated incorrectly. // if we fail here, the expectedDeets were populated incorrectly.
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bo.Results.BackupID, obo.Results.BackupID,
ws, ws,
bod.kms, bod.kms,
bod.sss, bod.sss,
@ -568,6 +592,9 @@ func runDriveIncrementalTest(
{ {
name: "add a new folder", name: "add a new folder",
updateFiles: func(t *testing.T, ctx context.Context) { updateFiles: func(t *testing.T, ctx context.Context) {
rc := control.DefaultRestoreConfig("")
rc.Location = container3
generateContainerOfItems( generateContainerOfItems(
t, t,
ctx, ctx,
@ -575,7 +602,10 @@ func runDriveIncrementalTest(
service, service,
category, category,
sel, sel,
atid, roidn.ID(), driveID, container3, atid,
roidn.ID(),
driveID,
rc,
2, 2,
0, 0,
fileDBF) fileDBF)
@ -600,25 +630,23 @@ func runDriveIncrementalTest(
} }
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.Defaults()) // cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.Defaults())
require.NoError(t, err, clues.ToCore(err)) // require.NoError(t, err, clues.ToCore(err))
bod.ctrl = cleanCtrl // bod.ctrl = cleanCtrl
var ( var (
t = suite.T() t = suite.T()
incMB = evmock.NewBus() mb = evmock.NewBus()
incBO = newTestBackupOp(
t,
ctx,
bod,
incMB,
opts)
) )
ctx, flush := tester.WithContext(t, ctx) ctx, flush := tester.WithContext(t, ctx)
defer flush() defer flush()
ibi = ibi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
suite.Run("PreTestSetup", func() { suite.Run("PreTestSetup", func() {
t := suite.T() t := suite.T()
@ -628,17 +656,16 @@ func runDriveIncrementalTest(
test.updateFiles(t, ctx) test.updateFiles(t, ctx)
}) })
err = incBO.Run(ctx) err = obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
bupID := incBO.Results.BackupID bupID := obo.Results.BackupID
checkBackupIsInManifests( checkBackupIsInManifests(
t, t,
ctx, ctx,
bod.kw, bod,
bod.sw, obo,
&incBO,
sel, sel,
roidn.ID(), roidn.ID(),
maps.Keys(categories)...) maps.Keys(categories)...)
@ -646,8 +673,7 @@ func runDriveIncrementalTest(
t, t,
ctx, ctx,
bupID, bupID,
bod.kw, bod,
bod.kms,
atid, atid,
roidn.ID(), roidn.ID(),
service, service,
@ -679,17 +705,9 @@ func runDriveIncrementalTest(
assertReadWrite = assert.LessOrEqual assertReadWrite = assert.LessOrEqual
} }
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written") assertReadWrite(t, expectWrites, obo.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written") assertReadWrite(t, expectNonMetaWrites, obo.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read") assertReadWrite(t, expectReads, obo.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
}) })
} }
} }
@ -730,26 +748,30 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
oldsel := selectors.NewOneDriveBackup([]string{uname}) oldsel := selectors.NewOneDriveBackup([]string{uname})
oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel)) oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, opts, 0) // don't re-use the suite.bi for this case because we need
defer bod.close(t, ctx) // to control for the backup version.
bi := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, opts, 0)
defer bi.close(t, ctx)
obo := bi.obo
bod := bi.bod
sel := bod.sel sel := bod.sel
// ensure the initial owner uses name in both cases // ensure the initial owner uses name in both cases
bo.ResourceOwner = sel.SetDiscreteOwnerIDName(uname, uname) obo.ResourceOwner = sel.SetDiscreteOwnerIDName(uname, uname)
// required, otherwise we don't run the migration // required, otherwise we don't run the migration
bo.BackupVersion = version.All8MigrateUserPNToID - 1 obo.BackupVersion = version.All8MigrateUserPNToID - 1
require.Equalf( require.Equalf(
t, t,
bo.ResourceOwner.Name(), obo.ResourceOwner.Name(),
bo.ResourceOwner.ID(), obo.ResourceOwner.ID(),
"historical representation of user id [%s] should match pn [%s]", "historical representation of user id [%s] should match pn [%s]",
bo.ResourceOwner.ID(), obo.ResourceOwner.ID(),
bo.ResourceOwner.Name()) obo.ResourceOwner.Name())
// run the initial backup // run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false) bi.runAndCheckBackup(t, ctx, mb, false)
newsel := selectors.NewOneDriveBackup([]string{uid}) newsel := selectors.NewOneDriveBackup([]string{uid})
newsel.Include(selTD.OneDriveBackupFolderScope(newsel)) newsel.Include(selTD.OneDriveBackupFolderScope(newsel))
@ -758,7 +780,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
var ( var (
incMB = evmock.NewBus() incMB = evmock.NewBus()
// the incremental backup op should have a proper user ID for the id. // the incremental backup op should have a proper user ID for the id.
incBO = newTestBackupOp(t, ctx, bod, incMB, opts) incBO = newTestBackupOp(t, ctx, bi.bod, incMB, opts)
) )
require.NotEqualf( require.NotEqualf(
@ -774,9 +796,8 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
checkBackupIsInManifests( checkBackupIsInManifests(
t, t,
ctx, ctx,
bod.kw, bod,
bod.sw, obo,
&incBO,
sel, sel,
uid, uid,
maps.Keys(categories)...) maps.Keys(categories)...)
@ -784,8 +805,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
t, t,
ctx, ctx,
incBO.Results.BackupID, incBO.Results.BackupID,
bod.kw, bod,
bod.kms,
creds.AzureTenantID, creds.AzureTenantID,
uid, uid,
path.OneDriveService, path.OneDriveService,
@ -806,13 +826,13 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
bid := incBO.Results.BackupID bid := incBO.Results.BackupID
bup := &backup.Backup{} bup := &backup.Backup{}
err = bod.kms.Get(ctx, model.BackupSchema, bid, bup) err = bi.bod.kms.Get(ctx, model.BackupSchema, bid, bup)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
var ( var (
ssid = bup.StreamStoreID ssid = bup.StreamStoreID
deets details.Details deets details.Details
ss = streamstore.NewStreamer(bod.kw, creds.AzureTenantID, path.OneDriveService) ss = streamstore.NewStreamer(bi.bod.kw, creds.AzureTenantID, path.OneDriveService)
) )
err = ss.Read(ctx, ssid, streamstore.DetailsReader(details.UnmarshalTo(&deets)), fault.New(true)) err = ss.Read(ctx, ssid, streamstore.DetailsReader(details.UnmarshalTo(&deets)), fault.New(true))
@ -836,7 +856,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveExtensions() {
tenID = tconfig.M365TenantID(t) tenID = tconfig.M365TenantID(t)
mb = evmock.NewBus() mb = evmock.NewBus()
userID = tconfig.SecondaryM365UserID(t) userID = tconfig.SecondaryM365UserID(t)
osel = selectors.NewOneDriveBackup([]string{userID}) sel = selectors.NewOneDriveBackup([]string{userID})
ws = deeTD.DriveIDFromRepoRef ws = deeTD.DriveIDFromRepoRef
svc = path.OneDriveService svc = path.OneDriveService
opts = control.Defaults() opts = control.Defaults()
@ -844,32 +864,44 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveExtensions() {
opts.ItemExtensionFactory = getTestExtensionFactories() opts.ItemExtensionFactory = getTestExtensionFactories()
osel.Include(selTD.OneDriveBackupFolderScope(osel)) sel.Include(selTD.OneDriveBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, osel.Selector, opts, version.Backup) // TODO: use the existing backupInstance for this test
defer bod.close(t, ctx) bi := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bi.bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false) bi.runAndCheckBackup(t, ctx, mb, false)
bID := bo.Results.BackupID bod := bi.bod
obo := bi.obo
bID := obo.Results.BackupID
checkBackupIsInManifests(
t,
ctx,
bod,
obo,
bod.sel,
suite.its.siteID,
path.LibrariesCategory)
deets, expectDeets := deeTD.GetDeetsInBackup( deets, expectDeets := deeTD.GetDeetsInBackup(
t, t,
ctx, ctx,
bID, bID,
tenID, tenID,
bod.sel.ID(), bod.sel,
svc, svc,
ws, ws,
bod.kms, bi.bod.kms,
bod.sss) bi.bod.sss)
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bID, bID,
ws, ws,
bod.kms, bi.bod.kms,
bod.sss, bi.bod.sss,
expectDeets, expectDeets,
false) false)

View File

@ -26,24 +26,99 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
) )
type SharePointBackupIntgSuite struct { type SharePointIntgSuite struct {
tester.Suite tester.Suite
its intgTesterSetup its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
} }
func TestSharePointBackupIntgSuite(t *testing.T) { func TestSharePointIntgSuite(t *testing.T) {
suite.Run(t, &SharePointBackupIntgSuite{ suite.Run(t, &SharePointIntgSuite{
Suite: tester.NewIntegrationSuite( Suite: tester.NewIntegrationSuite(
t, t,
[][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}), [][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}),
}) })
} }
func (suite *SharePointBackupIntgSuite) SetupSuite() { func (suite *SharePointIntgSuite) SetupSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(suite.T()) suite.its = newIntegrationTesterSetup(suite.T())
sel := selectors.NewSharePointBackup([]string{suite.its.siteID})
sel.Include(selTD.SharePointBackupFolderScope(sel))
sel.DiscreteOwner = suite.its.siteID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
} }
func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() { func (suite *SharePointIntgSuite) TeardownSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
if suite.bi != nil {
suite.bi.close(t, ctx)
}
}
func (suite *SharePointIntgSuite) TestBackup_Run_sharePoint() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
bod = suite.bi.bod
sel = suite.bi.bod.sel
obo = suite.bi.obo
siteID = suite.its.siteID
whatSet = deeTD.DriveIDFromRepoRef
)
checkBackupIsInManifests(
t,
ctx,
bod,
obo,
sel,
siteID,
path.LibrariesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
obo.Results.BackupID,
bod.acct.ID(),
sel,
path.SharePointService,
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
obo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
}
func (suite *SharePointIntgSuite) TestBackup_Run_incrementalSharePoint() {
sel := selectors.NewSharePointRestore([]string{suite.its.siteID}) sel := selectors.NewSharePointRestore([]string{suite.its.siteID})
ic := func(cs []string) selectors.Selector { ic := func(cs []string) selectors.Selector {
@ -75,6 +150,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
runDriveIncrementalTest( runDriveIncrementalTest(
suite, suite,
suite.bi,
suite.its.siteID, suite.its.siteID,
suite.its.userID, suite.its.userID,
resource.Sites, resource.Sites,
@ -86,36 +162,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
true) true)
} }
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePoint() { func (suite *SharePointIntgSuite) TestBackup_Run_sharePointExtensions() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
sel = selectors.NewSharePointBackup([]string{suite.its.siteID})
opts = control.Defaults()
)
sel.Include(selTD.SharePointBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
suite.its.siteID,
path.LibrariesCategory)
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
t := suite.T() t := suite.T()
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
@ -134,39 +181,42 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
sel.Include(selTD.SharePointBackupFolderScope(sel)) sel.Include(selTD.SharePointBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup) // TODO: use the existing backupInstance for this test
defer bod.close(t, ctx) bi := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bi.bod.close(t, ctx)
bi.runAndCheckBackup(t, ctx, mb, false)
bod := bi.bod
obo := bi.obo
bID := obo.Results.BackupID
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests( checkBackupIsInManifests(
t, t,
ctx, ctx,
bod.kw, bod,
bod.sw, obo,
&bo,
bod.sel, bod.sel,
suite.its.siteID, suite.its.siteID,
path.LibrariesCategory) path.LibrariesCategory)
bID := bo.Results.BackupID
deets, expectDeets := deeTD.GetDeetsInBackup( deets, expectDeets := deeTD.GetDeetsInBackup(
t, t,
ctx, ctx,
bID, bID,
tenID, tenID,
bod.sel.ID(), bod.sel,
svc, svc,
ws, ws,
bod.kms, bi.bod.kms,
bod.sss) bi.bod.sss)
deeTD.CheckBackupDetails( deeTD.CheckBackupDetails(
t, t,
ctx, ctx,
bID, bID,
ws, ws,
bod.kms, bi.bod.kms,
bod.sss, bi.bod.sss,
expectDeets, expectDeets,
false) false)

View File

@ -10,6 +10,8 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/idname"
idnMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/streamstore" "github.com/alcionai/corso/src/internal/streamstore"
@ -293,7 +295,16 @@ func CheckBackupDetails(
// of data. // of data.
mustEqualFolders bool, mustEqualFolders bool,
) { ) {
deets, result := GetDeetsInBackup(t, ctx, backupID, "", "", path.UnknownService, ws, ms, ssr) deets, result := GetDeetsInBackup(
t,
ctx,
backupID,
"",
idnMock.NewProvider("", ""),
path.UnknownService,
ws,
ms,
ssr)
t.Log("details entries in result") t.Log("details entries in result")
@ -339,7 +350,8 @@ func GetDeetsInBackup(
t *testing.T, t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID, backupID model.StableID,
tid, resourceOwner string, tid string,
protectedResource idname.Provider,
service path.ServiceType, service path.ServiceType,
ws whatSet, ws whatSet,
ms *kopia.ModelStore, ms *kopia.ModelStore,
@ -361,7 +373,9 @@ func GetDeetsInBackup(
fault.New(true)) fault.New(true))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
id := NewInDeets(path.Builder{}.Append(tid, service.String(), resourceOwner).String()) pb := path.Builder{}.Append(tid, service.String(), protectedResource.ID())
id := NewInDeets(pb.String())
id.AddAll(deets, ws) id.AddAll(deets, ws)
return deets, id return deets, id

44
src/pkg/selectors/testdata/selectors.go vendored Normal file
View File

@ -0,0 +1,44 @@
package testdata
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
func MakeSelector(
t *testing.T,
service path.ServiceType,
resourceOwners []string,
forRestore bool,
) selectors.Selector {
switch service {
case path.ExchangeService:
if forRestore {
return selectors.NewExchangeRestore(resourceOwners).Selector
}
return selectors.NewExchangeBackup(resourceOwners).Selector
case path.OneDriveService:
if forRestore {
return selectors.NewOneDriveRestore(resourceOwners).Selector
}
return selectors.NewOneDriveBackup(resourceOwners).Selector
case path.SharePointService:
if forRestore {
return selectors.NewSharePointRestore(resourceOwners).Selector
}
return selectors.NewSharePointBackup(resourceOwners).Selector
default:
require.FailNow(t, "unknown path service")
return selectors.Selector{}
}
}