setting aside temporarily

This commit is contained in:
ryanfkeepers 2023-07-10 11:07:39 -06:00
parent 9f334f7d30
commit ab0028bb0f
10 changed files with 958 additions and 747 deletions

View File

@ -2,6 +2,7 @@ package kopia
import (
"context"
"fmt"
"sort"
"github.com/alcionai/clues"
@ -287,6 +288,11 @@ func (b *baseFinder) getBase(
return nil, nil, nil, clues.Wrap(err, "getting snapshots")
}
fmt.Printf("\n-----\nmetas %+v\n-----\n", len(metas))
for _, m := range metas {
fmt.Println(m.ID, m.Labels)
}
// No snapshots means no backups so we can just exit here.
if len(metas) == 0 {
return nil, nil, nil, nil

View File

@ -428,7 +428,7 @@ func runRestore(
start := time.Now()
restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService)
restoreSel := getSelectorWith(t, sci.Service, sci.ResourceOwners, true)
restoreSel := selTD.MakeSelector(t, sci.Service, sci.ResourceOwners, true)
deets, err := restoreCtrl.ConsumeRestoreCollections(
ctx,
backupVersion,
@ -997,7 +997,7 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
ctx, flush := tester.NewContext(t)
defer flush()
restoreSel := getSelectorWith(t, test.service, []string{suite.user}, true)
restoreSel := selTD.MakeSelector(t, test.service, []string{suite.user}, true)
expectedDests := make([]destAndCats, 0, len(test.collections))
allItems := 0
allExpectedData := map[string]map[string][]byte{}

View File

@ -1157,40 +1157,6 @@ func backupSelectorForExpected(
return selectors.Selector{}
}
func getSelectorWith(
t *testing.T,
service path.ServiceType,
resourceOwners []string,
forRestore bool,
) selectors.Selector {
switch service {
case path.ExchangeService:
if forRestore {
return selectors.NewExchangeRestore(resourceOwners).Selector
}
return selectors.NewExchangeBackup(resourceOwners).Selector
case path.OneDriveService:
if forRestore {
return selectors.NewOneDriveRestore(resourceOwners).Selector
}
return selectors.NewOneDriveBackup(resourceOwners).Selector
case path.SharePointService:
if forRestore {
return selectors.NewSharePointRestore(resourceOwners).Selector
}
return selectors.NewSharePointBackup(resourceOwners).Selector
default:
require.FailNow(t, "unknown path service")
return selectors.Selector{}
}
}
func newController(
ctx context.Context,
t *testing.T,

View File

@ -0,0 +1,462 @@
package test_test
import (
"context"
"fmt"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/m365"
"github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/streamstore"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
"github.com/alcionai/corso/src/pkg/store"
)
// ---------------------------------------------------------------------------
// singleton
// ---------------------------------------------------------------------------
type backupInstance struct {
obo *operations.BackupOperation
bod *backupOpDependencies
// forms a linked list of incremental backups
incremental *backupInstance
}
func (bi *backupInstance) close(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
if bi.incremental != nil {
bi.incremental.close(t, ctx)
}
bi.bod.close(t, ctx)
}
func (bi *backupInstance) runAndCheckBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
mb *evmock.Bus,
acceptNoData bool,
) {
err := bi.obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bi.obo.Results, "the backup had non-zero results")
require.NotEmpty(t, bi.obo.Results.BackupID, "the backup generated an ID")
checkBackup(t, *bi.obo, mb, acceptNoData)
}
func (bi *backupInstance) runAndCheckIncrementalBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
mb *evmock.Bus,
) *backupInstance {
// bi.incremental = prepNewTestBackupOp(t, ctx, mb, bi.bod.sel, bi.obo.Options, bi.obo.BackupVersion)
incremental := &backupInstance{
bod: &backupOpDependencies{},
}
// copy the old bod connection references
*incremental.bod = *bi.bod
// generate a new controller to avoid statefulness
incremental.bod.renewController(t, ctx)
incremental.obo = newTestBackupOp(
t,
ctx,
incremental.bod,
mb,
bi.obo.Options)
err := bi.incremental.obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bi.incremental.obo.Results, "the incremental backup had non-zero results")
require.NotEmpty(t, bi.incremental.obo.Results.BackupID, "the incremental backup generated an ID")
return bi.incremental
}
func checkBackup(
t *testing.T,
obo operations.BackupOperation,
mb *evmock.Bus,
acceptNoData bool,
) {
expectStatus := []operations.OpStatus{operations.Completed}
if acceptNoData {
expectStatus = append(expectStatus, operations.NoData)
}
require.Contains(
t,
expectStatus,
obo.Status,
"backup doesn't match expectation, wanted any of %v, got %s",
expectStatus,
obo.Status)
require.Less(t, 0, obo.Results.ItemsWritten)
assert.Less(t, 0, obo.Results.ItemsRead, "count of items read")
assert.Less(t, int64(0), obo.Results.BytesRead, "bytes read")
assert.Less(t, int64(0), obo.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, 1, obo.Results.ResourceOwners, "count of resource owners")
assert.NoErrorf(
t,
obo.Errors.Failure(),
"incremental non-recoverable error %+v",
clues.ToCore(obo.Errors.Failure()))
assert.Empty(t, obo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
obo.Results.BackupID, "incremental pre-run backupID event")
}
func checkIncrementalBackup(
t *testing.T,
obo operations.BackupOperation,
mb *evmock.Bus,
) {
assert.NoError(t, obo.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(obo.Errors.Failure()))
assert.Empty(t, obo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "incremental backup-end events")
// FIXME: commented tests are flaky due to delta calls retaining data that is
// out of scope of the test data.
// we need to find a better way to make isolated assertions here.
// The addition of the deeTD package gives us enough coverage to comment
// out the tests for now and look to their improvemeng later.
// do some additional checks to ensure the incremental dealt with fewer items.
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
// if !toggles.DisableDelta {
// assert.Equal(t, test.deltaItemsRead+4, incBO.Results.ItemsRead, "incremental items read")
// assert.Equal(t, test.deltaItemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
// } else {
// assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
// assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
// }
// assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
obo.Results.BackupID, "incremental pre-run backupID event")
}
// ---------------------------------------------------------------------------
// initialization and dependencies
// ---------------------------------------------------------------------------
type backupOpDependencies struct {
acct account.Account
ctrl *m365.Controller
kms *kopia.ModelStore
kw *kopia.Wrapper
sel selectors.Selector
sss streamstore.Streamer
st storage.Storage
sw *store.Wrapper
closer func()
}
// prepNewTestBackupOp generates all clients required to run a backup operation,
// returning both a backup operation created with those clients, as well as
// the clients themselves.
func prepNewTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bus events.Eventer,
sel selectors.Selector,
opts control.Options,
backupVersion int,
) *backupInstance {
bod := &backupOpDependencies{
acct: tconfig.NewM365Account(t),
st: storeTD.NewPrefixedS3Storage(t),
}
k := kopia.NewConn(bod.st)
err := k.Initialize(ctx, repository.Options{})
require.NoError(t, err, clues.ToCore(err))
defer func() {
if err != nil {
bod.close(t, ctx)
t.FailNow()
}
}()
// kopiaRef comes with a count of 1 and Wrapper bumps it again
// we're so safe to close here.
bod.closer = func() {
err := k.Close(ctx)
assert.NoErrorf(t, err, "k close: %+v", clues.ToCore(err))
}
bod.kw, err = kopia.NewWrapper(k)
require.NoError(t, err, clues.ToCore(err))
bod.kms, err = kopia.NewModelStore(k)
require.NoError(t, err, clues.ToCore(err))
bod.sw = store.NewKopiaStore(bod.kms)
connectorResource := resource.Users
if sel.Service == selectors.ServiceSharePoint {
connectorResource = resource.Sites
}
bod.ctrl, bod.sel = ControllerWithSelector(
t,
ctx,
bod.acct,
connectorResource,
sel,
nil,
bod.close)
obo := newTestBackupOp(
t,
ctx,
bod,
bus,
opts)
bod.sss = streamstore.NewStreamer(
bod.kw,
bod.acct.ID(),
bod.sel.PathService())
return &backupInstance{
obo: obo,
bod: bod,
}
}
func (bod *backupOpDependencies) close(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
bod.closer()
if bod.kw != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kw close: %+v", clues.ToCore(err))
}
if bod.kms != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kms close: %+v", clues.ToCore(err))
}
}
// generates a new controller, and replaces bod.ctrl with that instance.
// useful for clearing controller state between runs.
func (bod *backupOpDependencies) renewController(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
rc := resource.Users
if bod.sel.PathService() == path.SharePointService {
rc = resource.Sites
}
newCtrl, err := m365.NewController(
ctx,
bod.acct,
rc,
bod.sel.PathService(),
control.Defaults())
require.NoError(t, err, clues.ToCore(err))
bod.ctrl = newCtrl
}
// newTestBackupOp accepts the clients required to compose a backup operation, plus
// any other metadata, and uses them to generate a new backup operation. This
// allows backup chains to utilize the same temp directory and configuration
// details.
func newTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bus events.Eventer,
opts control.Options,
) *operations.BackupOperation {
bod.ctrl.IDNameLookup = idname.NewCache(map[string]string{bod.sel.ID(): bod.sel.Name()})
bo, err := operations.NewBackupOperation(
ctx,
opts,
bod.kw,
bod.sw,
bod.ctrl,
bod.acct,
bod.sel,
bod.sel,
bus)
if !assert.NoError(t, err, clues.ToCore(err)) {
bod.close(t, ctx)
t.FailNow()
}
return &bo
}
func checkBackupIsInManifests(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bo *operations.BackupOperation,
sel selectors.Selector,
resourceOwner string,
categories ...path.CategoryType,
) {
for _, category := range categories {
t.Run("backup_in_manifests_"+category.String(), func(t *testing.T) {
var (
reasons = []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: sel.PathService(),
Category: category,
},
}
tags = map[string]string{kopia.TagBackupCategory: ""}
found bool
)
bf, err := bod.kw.NewBaseFinder(bod.sw)
require.NoError(t, err, clues.ToCore(err))
fmt.Printf("\n-----\nR %+v\nT %+v\n-----\n", reasons, tags)
mans := bf.FindBases(ctx, reasons, tags)
mmb := mans.MergeBases()
require.NotEmpty(t, mmb, "should find at least one merge base")
t.Log("Backup IDs from merge bases:")
for _, man := range mmb {
bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue
}
t.Log("-", bID)
if bID == string(bo.Results.BackupID) {
found = true
break
}
}
assert.True(t, found, "backup %q retrieved by previous snapshot manifest", bo.Results.BackupID)
})
}
}
func checkMetadataFilesExist(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID,
bod *backupOpDependencies,
tenant, resourceOwner string,
service path.ServiceType,
filesByCat map[path.CategoryType][]string,
) {
for category, files := range filesByCat {
t.Run("metadata_files_exist_"+category.String(), func(t *testing.T) {
bup := &backup.Backup{}
err := bod.kms.Get(ctx, model.BackupSchema, backupID, bup)
if !assert.NoError(t, err, clues.ToCore(err)) {
return
}
paths := []path.RestorePaths{}
pathsByRef := map[string][]string{}
for _, fName := range files {
p, err := path.Builder{}.
Append(fName).
ToServiceCategoryMetadataPath(tenant, resourceOwner, service, category, true)
if !assert.NoError(t, err, "bad metadata path", clues.ToCore(err)) {
continue
}
dir, err := p.Dir()
if !assert.NoError(t, err, "parent path", clues.ToCore(err)) {
continue
}
paths = append(
paths,
path.RestorePaths{StoragePath: p, RestorePath: dir})
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
}
cols, err := bod.kw.ProduceRestoreCollections(
ctx,
bup.SnapshotID,
paths,
nil,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
for _, col := range cols {
itemNames := []string{}
for item := range col.Items(ctx, fault.New(true)) {
assert.Implements(t, (*data.StreamSize)(nil), item)
s := item.(data.StreamSize)
assert.Greaterf(
t,
s.Size(),
int64(0),
"empty metadata file: %s/%s",
col.FullPath(),
item.UUID(),
)
itemNames = append(itemNames, item.UUID())
}
assert.ElementsMatchf(
t,
pathsByRef[col.FullPath().ShortRef()],
itemNames,
"collection %s missing expected files",
col.FullPath(),
)
}
})
}
}

View File

@ -8,7 +8,6 @@ import (
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/maps"
@ -16,7 +15,6 @@ import (
"github.com/alcionai/corso/src/internal/common/dttm"
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/m365/exchange"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
@ -34,64 +32,81 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
)
type ExchangeBackupIntgSuite struct {
type ExchangeIntgSuite struct {
tester.Suite
its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
}
func TestExchangeBackupIntgSuite(t *testing.T) {
suite.Run(t, &ExchangeBackupIntgSuite{
func TestExchangeIntgSuite(t *testing.T) {
suite.Run(t, &ExchangeIntgSuite{
Suite: tester.NewIntegrationSuite(
t,
[][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}),
})
}
func (suite *ExchangeBackupIntgSuite) SetupSuite() {
suite.its = newIntegrationTesterSetup(suite.T())
func (suite *ExchangeIntgSuite) SetupSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(t)
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(
sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()),
sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
sel.DiscreteOwner = suite.its.userID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
}
func (suite *ExchangeIntgSuite) TeardownSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
if suite.bi != nil {
suite.bi.close(t, ctx)
}
}
// TestBackup_Run ensures that Integration Testing works
// for the following scopes: Contacts, Events, and Mail
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
func (suite *ExchangeIntgSuite) TestBackup_Run_exchange() {
tests := []struct {
name string
selector func() *selectors.ExchangeBackup
category path.CategoryType
metadataFiles []string
}{
{
name: "Mail",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
sel.DiscreteOwner = suite.its.userID
return sel
},
name: "Mail",
category: path.EmailCategory,
metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
},
{
name: "Contacts",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
return sel
},
category: path.ContactsCategory,
metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
},
{
name: "Calendar Events",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
sel.Include(sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()))
return sel
},
category: path.EventsCategory,
metadataFiles: exchange.MetadataFileNames(path.EventsCategory),
},
// {
// name: "Contacts",
// category: path.ContactsCategory,
// metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
// },
// {
// name: "Events",
// category: path.EventsCategory,
// metadataFiles: exchange.MetadataFileNames(path.EventsCategory),
// },
}
for _, test := range tests {
suite.Run(test.name, func() {
@ -101,50 +116,45 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
defer flush()
var (
mb = evmock.NewBus()
sel = test.selector().Selector
opts = control.Defaults()
bod = suite.bi.bod
sel = suite.bi.bod.sel
obo = suite.bi.obo
userID = suite.its.userID
whatSet = deeTD.CategoryFromRepoRef
)
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup)
defer bod.close(t, ctx)
fmt.Printf("\n-----\n%s BUPs\n", test.name)
ibii := suite.bi
for ibii != nil {
fmt.Println(ibii.obo.Results.BackupID)
ibii = ibii.incremental
}
sel = bod.sel
fmt.Printf("-----\n")
userID := sel.ID()
m365, err := bod.acct.M365Config()
require.NoError(t, err, clues.ToCore(err))
// run the tests
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod,
obo,
sel,
userID,
test.category)
checkMetadataFilesExist(
t,
ctx,
bo.Results.BackupID,
bod.kw,
bod.kms,
m365.AzureTenantID,
obo.Results.BackupID,
bod,
suite.its.acct.ID(),
userID,
path.ExchangeService,
map[path.CategoryType][]string{test.category: test.metadataFiles})
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
obo.Results.BackupID,
bod.acct.ID(),
userID,
sel,
path.ExchangeService,
whatSet,
bod.kms,
@ -152,82 +162,82 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
obo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
// Basic, happy path incremental test. No changes are dictated or expected.
// This only tests that an incremental backup is runnable at all, and that it
// produces fewer results than the last backup.
var (
incMB = evmock.NewBus()
incBO = newTestBackupOp(
t,
ctx,
bod,
incMB,
opts)
)
runAndCheckBackup(t, ctx, &incBO, incMB, true)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&incBO,
sel,
userID,
test.category)
checkMetadataFilesExist(
t,
ctx,
incBO.Results.BackupID,
bod.kw,
bod.kms,
m365.AzureTenantID,
userID,
path.ExchangeService,
map[path.CategoryType][]string{test.category: test.metadataFiles})
deeTD.CheckBackupDetails(
t,
ctx,
incBO.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
// do some additional checks to ensure the incremental dealt with fewer items.
assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
assert.Greater(t, bo.Results.ItemsRead, incBO.Results.ItemsRead, "incremental items read")
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
incBO.Results.BackupID, "incremental backupID pre-declaration")
})
}
// // Basic, happy path incremental test. No changes are dictated or expected.
// // This only tests that an incremental backup is runnable at all, and that it
// // produces fewer results than the last backup.
// var (
// incMB = evmock.NewBus()
// incBO = newTestBackupOp(
// t,
// ctx,
// bod,
// incMB,
// opts)
// )
// runAndCheckBackup(t, ctx, &incBO, incMB, true)
// checkBackupIsInManifests(
// t,
// ctx,
// bod.kw,
// bod.sw,
// &incBO,
// sel,
// userID,
// test.category)
// checkMetadataFilesExist(
// t,
// ctx,
// incBO.Results.BackupID,
// bod.kw,
// bod.kms,
// m365.AzureTenantID,
// userID,
// path.ExchangeService,
// map[path.CategoryType][]string{test.category: test.metadataFiles})
// deeTD.CheckBackupDetails(
// t,
// ctx,
// incBO.Results.BackupID,
// whatSet,
// bod.kms,
// bod.sss,
// expectDeets,
// false)
// // do some additional checks to ensure the incremental dealt with fewer items.
// assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
// assert.Greater(t, bo.Results.ItemsRead, incBO.Results.ItemsRead, "incremental items read")
// assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
// assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
// assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
// assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
// assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
// assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
// assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
// assert.Equal(t,
// incMB.CalledWith[events.BackupStart][0][events.BackupID],
// incBO.Results.BackupID, "incremental backupID pre-declaration")
}
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalExchange() {
func (suite *ExchangeIntgSuite) TestBackup_Run_incrementalExchange() {
testExchangeContinuousBackups(suite, control.Toggles{})
}
func (suite *ExchangeBackupIntgSuite) TestBackup_Run_incrementalNonDeltaExchange() {
func (suite *ExchangeIntgSuite) TestBackup_Run_incrementalNonDeltaExchange() {
testExchangeContinuousBackups(suite, control.Toggles{DisableDelta: true})
}
func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles control.Toggles) {
func testExchangeContinuousBackups(suite *ExchangeIntgSuite, toggles control.Toggles) {
t := suite.T()
ctx, flush := tester.NewContext(t)
@ -346,6 +356,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
// populate initial test data
for category, gen := range dataset {
for destName := range gen.dests {
rc := control.DefaultRestoreConfig("")
rc.Location = destName
deets := generateContainerOfItems(
t,
ctx,
@ -356,7 +369,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
creds.AzureTenantID,
uidn.ID(),
"",
destName,
rc,
2,
version.Backup,
gen.dbf)
@ -380,11 +393,10 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
}
}
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
// run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false)
// run the initial incremental backup
ibi := suite.bi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory)
require.NoError(t, err, clues.ToCore(err))
@ -394,9 +406,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
bupDeets, _ := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
obo.Results.BackupID,
acct.ID(),
uidn.ID(),
uidn,
service,
whatSet,
bod.kms,
@ -467,7 +479,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
obo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
@ -553,6 +565,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
name: "add a new folder",
updateUserData: func(t *testing.T, ctx context.Context) {
for category, gen := range dataset {
rc := control.DefaultRestoreConfig("")
rc.Location = container3
deets := generateContainerOfItems(
t,
ctx,
@ -560,7 +575,10 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
service,
category,
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
creds.AzureTenantID, suite.its.userID, "", container3,
creds.AzureTenantID,
suite.its.userID,
"",
rc,
2,
version.Backup,
gen.dbf)
@ -763,15 +781,17 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
for _, test := range table {
suite.Run(test.name, func() {
var (
t = suite.T()
incMB = evmock.NewBus()
atid = creds.AzureTenantID
t = suite.T()
mb = evmock.NewBus()
atid = creds.AzureTenantID
ctx, flush = tester.WithContext(t, ctx)
)
ctx, flush := tester.WithContext(t, ctx)
defer flush()
incBO := newTestBackupOp(t, ctx, bod, incMB, opts)
ibi = ibi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
suite.Run("PreTestSetup", func() {
t := suite.T()
@ -782,17 +802,16 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
test.updateUserData(t, ctx)
})
err := incBO.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
bupID := obo.Results.BackupID
bupID := incBO.Results.BackupID
err := obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&incBO,
bod,
obo,
sels,
uidn.ID(),
maps.Keys(categories)...)
@ -800,8 +819,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
t,
ctx,
bupID,
bod.kw,
bod.kms,
bod,
atid,
uidn.ID(),
service,
@ -815,30 +833,6 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
bod.sss,
expectDeets,
true)
// FIXME: commented tests are flaky due to delta calls retaining data that is
// out of scope of the test data.
// we need to find a better way to make isolated assertions here.
// The addition of the deeTD package gives us enough coverage to comment
// out the tests for now and look to their improvemeng later.
// do some additional checks to ensure the incremental dealt with fewer items.
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
// if !toggles.DisableDelta {
// assert.Equal(t, test.deltaItemsRead+4, incBO.Results.ItemsRead, "incremental items read")
// assert.Equal(t, test.deltaItemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
// } else {
// assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
// assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
// }
// assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
})
}
}

View File

@ -2,7 +2,6 @@ package test_test
import (
"context"
"fmt"
"testing"
"time"
@ -15,24 +14,16 @@ import (
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/events"
evmock "github.com/alcionai/corso/src/internal/events/mock"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/m365"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
"github.com/alcionai/corso/src/internal/m365/graph"
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
"github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/operations"
"github.com/alcionai/corso/src/internal/streamstore"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/repository"
"github.com/alcionai/corso/src/pkg/count"
"github.com/alcionai/corso/src/pkg/extensions"
"github.com/alcionai/corso/src/pkg/fault"
@ -40,9 +31,6 @@ import (
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365/api"
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
"github.com/alcionai/corso/src/pkg/storage"
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
"github.com/alcionai/corso/src/pkg/store"
)
// Does not use the tester.DefaultTestRestoreDestination syntax as some of these
@ -50,308 +38,69 @@ import (
// they get clearly selected without accidental overlap.
const incrementalsDestContainerPrefix = "incrementals_ci_"
type backupOpDependencies struct {
acct account.Account
ctrl *m365.Controller
kms *kopia.ModelStore
kw *kopia.Wrapper
sel selectors.Selector
sss streamstore.Streamer
st storage.Storage
sw *store.Wrapper
// ---------------------------------------------------------------------------
// Suite Setup
// ---------------------------------------------------------------------------
closer func()
type intgTesterSetup struct {
ac api.Client
gockAC api.Client
acct account.Account
userID string
userDriveID string
userDriveRootFolderID string
siteID string
siteDriveID string
siteDriveRootFolderID string
}
func (bod *backupOpDependencies) close(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
) {
bod.closer()
func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
its := intgTesterSetup{}
if bod.kw != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kw close: %+v", clues.ToCore(err))
}
ctx, flush := tester.NewContext(t)
defer flush()
if bod.kms != nil {
err := bod.kw.Close(ctx)
assert.NoErrorf(t, err, "kms close: %+v", clues.ToCore(err))
}
}
graph.InitializeConcurrencyLimiter(ctx, true, 4)
// prepNewTestBackupOp generates all clients required to run a backup operation,
// returning both a backup operation created with those clients, as well as
// the clients themselves.
func prepNewTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bus events.Eventer,
sel selectors.Selector,
opts control.Options,
backupVersion int,
) (
operations.BackupOperation,
*backupOpDependencies,
) {
bod := &backupOpDependencies{
acct: tconfig.NewM365Account(t),
st: storeTD.NewPrefixedS3Storage(t),
}
k := kopia.NewConn(bod.st)
err := k.Initialize(ctx, repository.Options{})
its.acct = tconfig.NewM365Account(t)
creds, err := its.acct.M365Config()
require.NoError(t, err, clues.ToCore(err))
defer func() {
if err != nil {
bod.close(t, ctx)
t.FailNow()
}
}()
// kopiaRef comes with a count of 1 and Wrapper bumps it again
// we're so safe to close here.
bod.closer = func() {
err := k.Close(ctx)
assert.NoErrorf(t, err, "k close: %+v", clues.ToCore(err))
}
bod.kw, err = kopia.NewWrapper(k)
if !assert.NoError(t, err, clues.ToCore(err)) {
return operations.BackupOperation{}, nil
}
bod.kms, err = kopia.NewModelStore(k)
if !assert.NoError(t, err, clues.ToCore(err)) {
return operations.BackupOperation{}, nil
}
bod.sw = store.NewKopiaStore(bod.kms)
connectorResource := resource.Users
if sel.Service == selectors.ServiceSharePoint {
connectorResource = resource.Sites
}
bod.ctrl, bod.sel = ControllerWithSelector(
t,
ctx,
bod.acct,
connectorResource,
sel,
nil,
bod.close)
bo := newTestBackupOp(
t,
ctx,
bod,
bus,
opts)
bod.sss = streamstore.NewStreamer(
bod.kw,
bod.acct.ID(),
bod.sel.PathService())
return bo, bod
}
// newTestBackupOp accepts the clients required to compose a backup operation, plus
// any other metadata, and uses them to generate a new backup operation. This
// allows backup chains to utilize the same temp directory and configuration
// details.
func newTestBackupOp(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bod *backupOpDependencies,
bus events.Eventer,
opts control.Options,
) operations.BackupOperation {
bod.ctrl.IDNameLookup = idname.NewCache(map[string]string{bod.sel.ID(): bod.sel.Name()})
bo, err := operations.NewBackupOperation(
ctx,
opts,
bod.kw,
bod.sw,
bod.ctrl,
bod.acct,
bod.sel,
bod.sel,
bus)
if !assert.NoError(t, err, clues.ToCore(err)) {
bod.close(t, ctx)
t.FailNow()
}
return bo
}
func runAndCheckBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
bo *operations.BackupOperation,
mb *evmock.Bus,
acceptNoData bool,
) {
err := bo.Run(ctx)
its.ac, err = api.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
require.NotEmpty(t, bo.Results, "the backup had non-zero results")
require.NotEmpty(t, bo.Results.BackupID, "the backup generated an ID")
expectStatus := []operations.OpStatus{operations.Completed}
if acceptNoData {
expectStatus = append(expectStatus, operations.NoData)
}
its.gockAC, err = mock.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
require.Contains(
t,
expectStatus,
bo.Status,
"backup doesn't match expectation, wanted any of %v, got %s",
expectStatus,
bo.Status)
// user drive
require.Less(t, 0, bo.Results.ItemsWritten)
assert.Less(t, 0, bo.Results.ItemsRead, "count of items read")
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
assert.NoError(t, bo.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, bo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
bo.Results.BackupID, "backupID pre-declaration")
}
its.userID = tconfig.M365UserID(t)
func checkBackupIsInManifests(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
kw *kopia.Wrapper,
sw *store.Wrapper,
bo *operations.BackupOperation,
sel selectors.Selector,
resourceOwner string,
categories ...path.CategoryType,
) {
for _, category := range categories {
t.Run(category.String(), func(t *testing.T) {
var (
reasons = []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: sel.PathService(),
Category: category,
},
}
tags = map[string]string{kopia.TagBackupCategory: ""}
found bool
)
userDrive, err := its.ac.Users().GetDefaultDrive(ctx, its.userID)
require.NoError(t, err, clues.ToCore(err))
bf, err := kw.NewBaseFinder(sw)
require.NoError(t, err, clues.ToCore(err))
its.userDriveID = ptr.Val(userDrive.GetId())
mans := bf.FindBases(ctx, reasons, tags)
for _, man := range mans.MergeBases() {
bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue
}
userDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.userDriveID)
require.NoError(t, err, clues.ToCore(err))
if bID == string(bo.Results.BackupID) {
found = true
break
}
}
its.userDriveRootFolderID = ptr.Val(userDriveRootFolder.GetId())
assert.True(t, found, "backup retrieved by previous snapshot manifest")
})
}
}
its.siteID = tconfig.M365SiteID(t)
func checkMetadataFilesExist(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID,
kw *kopia.Wrapper,
ms *kopia.ModelStore,
tenant, resourceOwner string,
service path.ServiceType,
filesByCat map[path.CategoryType][]string,
) {
for category, files := range filesByCat {
t.Run(category.String(), func(t *testing.T) {
bup := &backup.Backup{}
// site
err := ms.Get(ctx, model.BackupSchema, backupID, bup)
if !assert.NoError(t, err, clues.ToCore(err)) {
return
}
siteDrive, err := its.ac.Sites().GetDefaultDrive(ctx, its.siteID)
require.NoError(t, err, clues.ToCore(err))
paths := []path.RestorePaths{}
pathsByRef := map[string][]string{}
its.siteDriveID = ptr.Val(siteDrive.GetId())
for _, fName := range files {
p, err := path.Builder{}.
Append(fName).
ToServiceCategoryMetadataPath(tenant, resourceOwner, service, category, true)
if !assert.NoError(t, err, "bad metadata path", clues.ToCore(err)) {
continue
}
siteDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.siteDriveID)
require.NoError(t, err, clues.ToCore(err))
dir, err := p.Dir()
if !assert.NoError(t, err, "parent path", clues.ToCore(err)) {
continue
}
its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId())
paths = append(
paths,
path.RestorePaths{StoragePath: p, RestorePath: dir})
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
}
cols, err := kw.ProduceRestoreCollections(
ctx,
bup.SnapshotID,
paths,
nil,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
for _, col := range cols {
itemNames := []string{}
for item := range col.Items(ctx, fault.New(true)) {
assert.Implements(t, (*data.StreamSize)(nil), item)
s := item.(data.StreamSize)
assert.Greaterf(
t,
s.Size(),
int64(0),
"empty metadata file: %s/%s",
col.FullPath(),
item.UUID(),
)
itemNames = append(itemNames, item.UUID())
}
assert.ElementsMatchf(
t,
pathsByRef[col.FullPath().ShortRef()],
itemNames,
"collection %s missing expected files",
col.FullPath(),
)
}
})
}
return its
}
// ---------------------------------------------------------------------------
@ -373,7 +122,8 @@ func generateContainerOfItems(
service path.ServiceType,
cat path.CategoryType,
sel selectors.Selector,
tenantID, resourceOwner, driveID, destFldr string,
tenantID, resourceOwner, driveID string,
rc control.RestoreConfig,
howManyItems int,
backupVersion int,
dbf dataBuilderFunc,
@ -391,11 +141,11 @@ func generateContainerOfItems(
})
}
pathFolders := []string{destFldr}
pathFolders := []string{}
switch service {
case path.OneDriveService, path.SharePointService:
pathFolders = []string{odConsts.DrivesPathDir, driveID, odConsts.RootPathDir, destFldr}
pathFolders = []string{odConsts.DrivesPathDir, driveID, odConsts.RootPathDir}
}
collections := []incrementalCollection{{
@ -404,14 +154,10 @@ func generateContainerOfItems(
items: items,
}}
restoreCfg := control.DefaultRestoreConfig(dttm.SafeForTesting)
restoreCfg.Location = destFldr
dataColls := buildCollections(
t,
service,
tenantID, resourceOwner,
restoreCfg,
collections)
opts := control.Defaults()
@ -421,7 +167,7 @@ func generateContainerOfItems(
ctx,
backupVersion,
sel,
restoreCfg,
rc,
opts,
dataColls,
fault.New(true),
@ -467,7 +213,6 @@ func buildCollections(
t *testing.T,
service path.ServiceType,
tenant, user string,
restoreCfg control.RestoreConfig,
colls []incrementalCollection,
) []data.RestoreCollection {
t.Helper()
@ -475,14 +220,8 @@ func buildCollections(
collections := make([]data.RestoreCollection, 0, len(colls))
for _, c := range colls {
pth := toDataLayerPath(
t,
service,
tenant,
user,
c.category,
c.pathFolders,
false)
pth, err := path.Build(tenant, user, service, c.category, false, c.pathFolders...)
require.NoError(t, err, clues.ToCore(err))
mc := exchMock.NewCollection(pth, pth, len(c.items))
@ -497,38 +236,6 @@ func buildCollections(
return collections
}
func toDataLayerPath(
t *testing.T,
service path.ServiceType,
tenant, resourceOwner string,
category path.CategoryType,
elements []string,
isItem bool,
) path.Path {
t.Helper()
var (
pb = path.Builder{}.Append(elements...)
p path.Path
err error
)
switch service {
case path.ExchangeService:
p, err = pb.ToDataLayerExchangePathForCategory(tenant, resourceOwner, category, isItem)
case path.OneDriveService:
p, err = pb.ToDataLayerOneDrivePath(tenant, resourceOwner, isItem)
case path.SharePointService:
p, err = pb.ToDataLayerSharePointPath(tenant, resourceOwner, category, isItem)
default:
err = clues.New(fmt.Sprintf("unknown service: %s", service))
}
require.NoError(t, err, clues.ToCore(err))
return p
}
// A QoL builder for live instances that updates
// the selector's owner id and name in the process
// to help avoid gotchas.
@ -564,70 +271,6 @@ func ControllerWithSelector(
return ctrl, sel
}
// ---------------------------------------------------------------------------
// Suite Setup
// ---------------------------------------------------------------------------
type intgTesterSetup struct {
ac api.Client
gockAC api.Client
userID string
userDriveID string
userDriveRootFolderID string
siteID string
siteDriveID string
siteDriveRootFolderID string
}
func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
its := intgTesterSetup{}
ctx, flush := tester.NewContext(t)
defer flush()
graph.InitializeConcurrencyLimiter(ctx, true, 4)
a := tconfig.NewM365Account(t)
creds, err := a.M365Config()
require.NoError(t, err, clues.ToCore(err))
its.ac, err = api.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
its.gockAC, err = mock.NewClient(creds)
require.NoError(t, err, clues.ToCore(err))
// user drive
its.userID = tconfig.M365UserID(t)
userDrive, err := its.ac.Users().GetDefaultDrive(ctx, its.userID)
require.NoError(t, err, clues.ToCore(err))
its.userDriveID = ptr.Val(userDrive.GetId())
userDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.userDriveID)
require.NoError(t, err, clues.ToCore(err))
its.userDriveRootFolderID = ptr.Val(userDriveRootFolder.GetId())
its.siteID = tconfig.M365SiteID(t)
// site
siteDrive, err := its.ac.Sites().GetDefaultDrive(ctx, its.siteID)
require.NoError(t, err, clues.ToCore(err))
its.siteDriveID = ptr.Val(siteDrive.GetId())
siteDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.siteDriveID)
require.NoError(t, err, clues.ToCore(err))
its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId())
return its
}
func getTestExtensionFactories() []extensions.CreateItemExtensioner {
return []extensions.CreateItemExtensioner{
&extensions.MockItemExtensionFactory{},

View File

@ -43,6 +43,9 @@ import (
type OneDriveBackupIntgSuite struct {
tester.Suite
its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
}
func TestOneDriveBackupIntgSuite(t *testing.T) {
@ -54,7 +57,24 @@ func TestOneDriveBackupIntgSuite(t *testing.T) {
}
func (suite *OneDriveBackupIntgSuite) SetupSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(suite.T())
sel := selectors.NewOneDriveBackup([]string{suite.its.siteID})
sel.Include(selTD.OneDriveBackupFolderScope(sel))
sel.DiscreteOwner = suite.its.userID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
}
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
@ -64,39 +84,37 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
defer flush()
var (
tenID = tconfig.M365TenantID(t)
mb = evmock.NewBus()
userID = tconfig.SecondaryM365UserID(t)
osel = selectors.NewOneDriveBackup([]string{userID})
ws = deeTD.DriveIDFromRepoRef
svc = path.OneDriveService
opts = control.Defaults()
bod = suite.bi.bod
sel = suite.bi.bod.sel
obo = suite.bi.obo
siteID = suite.its.siteID
whatSet = deeTD.DriveIDFromRepoRef
)
osel.Include(selTD.OneDriveBackupFolderScope(osel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, osel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
bID := bo.Results.BackupID
checkBackupIsInManifests(
t,
ctx,
bod,
obo,
sel,
siteID,
path.LibrariesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bID,
tenID,
bod.sel.ID(),
svc,
ws,
obo.Results.BackupID,
bod.acct.ID(),
sel,
path.OneDriveService,
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bID,
ws,
obo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
@ -135,6 +153,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
runDriveIncrementalTest(
suite,
suite.bi,
suite.its.userID,
suite.its.userID,
resource.Users,
@ -148,6 +167,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
func runDriveIncrementalTest(
suite tester.Suite,
bi *backupInstance,
owner, permissionsUser string,
rc resource.Category,
service path.ServiceType,
@ -164,7 +184,6 @@ func runDriveIncrementalTest(
var (
acct = tconfig.NewM365Account(t)
opts = control.Defaults()
mb = evmock.NewBus()
ws = deeTD.DriveIDFromRepoRef
@ -223,6 +242,9 @@ func runDriveIncrementalTest(
// through the changes. This should be enough to cover most delta
// actions.
for _, destName := range genDests {
rc := control.DefaultRestoreConfig("")
rc.Location = destName
deets := generateContainerOfItems(
t,
ctx,
@ -230,7 +252,10 @@ func runDriveIncrementalTest(
service,
category,
sel,
atid, roidn.ID(), driveID, destName,
atid,
roidn.ID(),
driveID,
rc,
2,
// Use an old backup version so we don't need metadata files.
0,
@ -260,20 +285,19 @@ func runDriveIncrementalTest(
containerIDs[destName] = ptr.Val(resp.GetId())
}
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup)
defer bod.close(t, ctx)
// run the initial incremental backup
ibi := bi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
sel = bod.sel
// run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false)
// precheck to ensure the expectedDeets are correct.
// if we fail here, the expectedDeets were populated incorrectly.
deeTD.CheckBackupDetails(
t,
ctx,
bo.Results.BackupID,
obo.Results.BackupID,
ws,
bod.kms,
bod.sss,
@ -568,6 +592,9 @@ func runDriveIncrementalTest(
{
name: "add a new folder",
updateFiles: func(t *testing.T, ctx context.Context) {
rc := control.DefaultRestoreConfig("")
rc.Location = container3
generateContainerOfItems(
t,
ctx,
@ -575,7 +602,10 @@ func runDriveIncrementalTest(
service,
category,
sel,
atid, roidn.ID(), driveID, container3,
atid,
roidn.ID(),
driveID,
rc,
2,
0,
fileDBF)
@ -600,25 +630,23 @@ func runDriveIncrementalTest(
}
for _, test := range table {
suite.Run(test.name, func() {
cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.Defaults())
require.NoError(t, err, clues.ToCore(err))
// cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.Defaults())
// require.NoError(t, err, clues.ToCore(err))
bod.ctrl = cleanCtrl
// bod.ctrl = cleanCtrl
var (
t = suite.T()
incMB = evmock.NewBus()
incBO = newTestBackupOp(
t,
ctx,
bod,
incMB,
opts)
t = suite.T()
mb = evmock.NewBus()
)
ctx, flush := tester.WithContext(t, ctx)
defer flush()
ibi = ibi.runAndCheckIncrementalBackup(t, ctx, mb)
obo := ibi.obo
bod := ibi.bod
suite.Run("PreTestSetup", func() {
t := suite.T()
@ -628,17 +656,16 @@ func runDriveIncrementalTest(
test.updateFiles(t, ctx)
})
err = incBO.Run(ctx)
err = obo.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
bupID := incBO.Results.BackupID
bupID := obo.Results.BackupID
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&incBO,
bod,
obo,
sel,
roidn.ID(),
maps.Keys(categories)...)
@ -646,8 +673,7 @@ func runDriveIncrementalTest(
t,
ctx,
bupID,
bod.kw,
bod.kms,
bod,
atid,
roidn.ID(),
service,
@ -679,17 +705,9 @@ func runDriveIncrementalTest(
assertReadWrite = assert.LessOrEqual
}
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
assertReadWrite(t, expectWrites, obo.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, obo.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, obo.Results.ItemsRead, "incremental items read")
})
}
}
@ -730,26 +748,30 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
oldsel := selectors.NewOneDriveBackup([]string{uname})
oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, opts, 0)
defer bod.close(t, ctx)
// don't re-use the suite.bi for this case because we need
// to control for the backup version.
bi := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, opts, 0)
defer bi.close(t, ctx)
obo := bi.obo
bod := bi.bod
sel := bod.sel
// ensure the initial owner uses name in both cases
bo.ResourceOwner = sel.SetDiscreteOwnerIDName(uname, uname)
obo.ResourceOwner = sel.SetDiscreteOwnerIDName(uname, uname)
// required, otherwise we don't run the migration
bo.BackupVersion = version.All8MigrateUserPNToID - 1
obo.BackupVersion = version.All8MigrateUserPNToID - 1
require.Equalf(
t,
bo.ResourceOwner.Name(),
bo.ResourceOwner.ID(),
obo.ResourceOwner.Name(),
obo.ResourceOwner.ID(),
"historical representation of user id [%s] should match pn [%s]",
bo.ResourceOwner.ID(),
bo.ResourceOwner.Name())
obo.ResourceOwner.ID(),
obo.ResourceOwner.Name())
// run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false)
bi.runAndCheckBackup(t, ctx, mb, false)
newsel := selectors.NewOneDriveBackup([]string{uid})
newsel.Include(selTD.OneDriveBackupFolderScope(newsel))
@ -758,7 +780,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
var (
incMB = evmock.NewBus()
// the incremental backup op should have a proper user ID for the id.
incBO = newTestBackupOp(t, ctx, bod, incMB, opts)
incBO = newTestBackupOp(t, ctx, bi.bod, incMB, opts)
)
require.NotEqualf(
@ -774,9 +796,8 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&incBO,
bod,
obo,
sel,
uid,
maps.Keys(categories)...)
@ -784,8 +805,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
t,
ctx,
incBO.Results.BackupID,
bod.kw,
bod.kms,
bod,
creds.AzureTenantID,
uid,
path.OneDriveService,
@ -806,13 +826,13 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
bid := incBO.Results.BackupID
bup := &backup.Backup{}
err = bod.kms.Get(ctx, model.BackupSchema, bid, bup)
err = bi.bod.kms.Get(ctx, model.BackupSchema, bid, bup)
require.NoError(t, err, clues.ToCore(err))
var (
ssid = bup.StreamStoreID
deets details.Details
ss = streamstore.NewStreamer(bod.kw, creds.AzureTenantID, path.OneDriveService)
ss = streamstore.NewStreamer(bi.bod.kw, creds.AzureTenantID, path.OneDriveService)
)
err = ss.Read(ctx, ssid, streamstore.DetailsReader(details.UnmarshalTo(&deets)), fault.New(true))
@ -836,7 +856,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveExtensions() {
tenID = tconfig.M365TenantID(t)
mb = evmock.NewBus()
userID = tconfig.SecondaryM365UserID(t)
osel = selectors.NewOneDriveBackup([]string{userID})
sel = selectors.NewOneDriveBackup([]string{userID})
ws = deeTD.DriveIDFromRepoRef
svc = path.OneDriveService
opts = control.Defaults()
@ -844,32 +864,44 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveExtensions() {
opts.ItemExtensionFactory = getTestExtensionFactories()
osel.Include(selTD.OneDriveBackupFolderScope(osel))
sel.Include(selTD.OneDriveBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, osel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
// TODO: use the existing backupInstance for this test
bi := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bi.bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
bi.runAndCheckBackup(t, ctx, mb, false)
bID := bo.Results.BackupID
bod := bi.bod
obo := bi.obo
bID := obo.Results.BackupID
checkBackupIsInManifests(
t,
ctx,
bod,
obo,
bod.sel,
suite.its.siteID,
path.LibrariesCategory)
deets, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bID,
tenID,
bod.sel.ID(),
bod.sel,
svc,
ws,
bod.kms,
bod.sss)
bi.bod.kms,
bi.bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bID,
ws,
bod.kms,
bod.sss,
bi.bod.kms,
bi.bod.sss,
expectDeets,
false)

View File

@ -26,24 +26,99 @@ import (
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
)
type SharePointBackupIntgSuite struct {
type SharePointIntgSuite struct {
tester.Suite
its intgTesterSetup
// the goal of backupInstances is to run a single backup at the start of
// the suite, and re-use that backup throughout the rest of the suite.
bi *backupInstance
}
func TestSharePointBackupIntgSuite(t *testing.T) {
suite.Run(t, &SharePointBackupIntgSuite{
func TestSharePointIntgSuite(t *testing.T) {
suite.Run(t, &SharePointIntgSuite{
Suite: tester.NewIntegrationSuite(
t,
[][]string{tconfig.M365AcctCredEnvs, storeTD.AWSStorageCredEnvs}),
})
}
func (suite *SharePointBackupIntgSuite) SetupSuite() {
func (suite *SharePointIntgSuite) SetupSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
suite.its = newIntegrationTesterSetup(suite.T())
sel := selectors.NewSharePointBackup([]string{suite.its.siteID})
sel.Include(selTD.SharePointBackupFolderScope(sel))
sel.DiscreteOwner = suite.its.siteID
var (
mb = evmock.NewBus()
opts = control.Defaults()
)
suite.bi = prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
suite.bi.runAndCheckBackup(t, ctx, mb, false)
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
func (suite *SharePointIntgSuite) TeardownSuite() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
if suite.bi != nil {
suite.bi.close(t, ctx)
}
}
func (suite *SharePointIntgSuite) TestBackup_Run_sharePoint() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
bod = suite.bi.bod
sel = suite.bi.bod.sel
obo = suite.bi.obo
siteID = suite.its.siteID
whatSet = deeTD.DriveIDFromRepoRef
)
checkBackupIsInManifests(
t,
ctx,
bod,
obo,
sel,
siteID,
path.LibrariesCategory)
_, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
obo.Results.BackupID,
bod.acct.ID(),
sel,
path.SharePointService,
whatSet,
bod.kms,
bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
obo.Results.BackupID,
whatSet,
bod.kms,
bod.sss,
expectDeets,
false)
}
func (suite *SharePointIntgSuite) TestBackup_Run_incrementalSharePoint() {
sel := selectors.NewSharePointRestore([]string{suite.its.siteID})
ic := func(cs []string) selectors.Selector {
@ -75,6 +150,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
runDriveIncrementalTest(
suite,
suite.bi,
suite.its.siteID,
suite.its.userID,
resource.Sites,
@ -86,36 +162,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
true)
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePoint() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
mb = evmock.NewBus()
sel = selectors.NewSharePointBackup([]string{suite.its.siteID})
opts = control.Defaults()
)
sel.Include(selTD.SharePointBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod.sel,
suite.its.siteID,
path.LibrariesCategory)
}
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
func (suite *SharePointIntgSuite) TestBackup_Run_sharePointExtensions() {
t := suite.T()
ctx, flush := tester.NewContext(t)
@ -134,39 +181,42 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
sel.Include(selTD.SharePointBackupFolderScope(sel))
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bod.close(t, ctx)
// TODO: use the existing backupInstance for this test
bi := prepNewTestBackupOp(t, ctx, mb, sel.Selector, opts, version.Backup)
defer bi.bod.close(t, ctx)
bi.runAndCheckBackup(t, ctx, mb, false)
bod := bi.bod
obo := bi.obo
bID := obo.Results.BackupID
runAndCheckBackup(t, ctx, &bo, mb, false)
checkBackupIsInManifests(
t,
ctx,
bod.kw,
bod.sw,
&bo,
bod,
obo,
bod.sel,
suite.its.siteID,
path.LibrariesCategory)
bID := bo.Results.BackupID
deets, expectDeets := deeTD.GetDeetsInBackup(
t,
ctx,
bID,
tenID,
bod.sel.ID(),
bod.sel,
svc,
ws,
bod.kms,
bod.sss)
bi.bod.kms,
bi.bod.sss)
deeTD.CheckBackupDetails(
t,
ctx,
bID,
ws,
bod.kms,
bod.sss,
bi.bod.kms,
bi.bod.sss,
expectDeets,
false)

View File

@ -10,6 +10,8 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/idname"
idnMock "github.com/alcionai/corso/src/internal/common/idname/mock"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/streamstore"
@ -293,7 +295,16 @@ func CheckBackupDetails(
// of data.
mustEqualFolders bool,
) {
deets, result := GetDeetsInBackup(t, ctx, backupID, "", "", path.UnknownService, ws, ms, ssr)
deets, result := GetDeetsInBackup(
t,
ctx,
backupID,
"",
idnMock.NewProvider("", ""),
path.UnknownService,
ws,
ms,
ssr)
t.Log("details entries in result")
@ -339,7 +350,8 @@ func GetDeetsInBackup(
t *testing.T,
ctx context.Context, //revive:disable-line:context-as-argument
backupID model.StableID,
tid, resourceOwner string,
tid string,
protectedResource idname.Provider,
service path.ServiceType,
ws whatSet,
ms *kopia.ModelStore,
@ -361,7 +373,9 @@ func GetDeetsInBackup(
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
id := NewInDeets(path.Builder{}.Append(tid, service.String(), resourceOwner).String())
pb := path.Builder{}.Append(tid, service.String(), protectedResource.ID())
id := NewInDeets(pb.String())
id.AddAll(deets, ws)
return deets, id

44
src/pkg/selectors/testdata/selectors.go vendored Normal file
View File

@ -0,0 +1,44 @@
package testdata
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
func MakeSelector(
t *testing.T,
service path.ServiceType,
resourceOwners []string,
forRestore bool,
) selectors.Selector {
switch service {
case path.ExchangeService:
if forRestore {
return selectors.NewExchangeRestore(resourceOwners).Selector
}
return selectors.NewExchangeBackup(resourceOwners).Selector
case path.OneDriveService:
if forRestore {
return selectors.NewOneDriveRestore(resourceOwners).Selector
}
return selectors.NewOneDriveBackup(resourceOwners).Selector
case path.SharePointService:
if forRestore {
return selectors.NewSharePointRestore(resourceOwners).Selector
}
return selectors.NewSharePointBackup(resourceOwners).Selector
default:
require.FailNow(t, "unknown path service")
return selectors.Selector{}
}
}