add indeets test helper, implement in exchange op (#3295)
Adds a helper for building expected details entries and checking them after a backup. Implements the helper in the exchange backup tests in operations/backup integration. Will follow with a onedrive implementation. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🤖 Supportability/Tests #### Issue(s) * #3240 #### Test Plan - [x] 💪 Manual - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
c0725b9cf9
commit
c5b388a721
@ -112,7 +112,7 @@ func runDisplayM365JSON(
|
|||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
) error {
|
) error {
|
||||||
drive, err := api.GetDriveByID(ctx, srv, user)
|
drive, err := api.GetUsersDrive(ctx, srv, user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -441,10 +441,7 @@ func restoreCollection(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
locationRef := &path.Builder{}
|
locationRef := path.Builder{}.Append(itemPath.Folders()...)
|
||||||
if category == path.ContactsCategory {
|
|
||||||
locationRef = locationRef.Append(itemPath.Folders()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = deets.Add(
|
err = deets.Add(
|
||||||
itemPath,
|
itemPath,
|
||||||
|
|||||||
@ -336,18 +336,33 @@ func GetItemPermission(
|
|||||||
return perm, nil
|
return perm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriveByID(
|
func GetUsersDrive(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
srv graph.Servicer,
|
srv graph.Servicer,
|
||||||
userID string,
|
user string,
|
||||||
) (models.Driveable, error) {
|
) (models.Driveable, error) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
d, err := srv.Client().
|
d, err := srv.Client().
|
||||||
UsersById(userID).
|
UsersById(user).
|
||||||
Drive().
|
Drive().
|
||||||
Get(ctx, nil)
|
Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, graph.Wrap(ctx, err, "getting drive")
|
return nil, graph.Wrap(ctx, err, "getting user's drive")
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetSitesDefaultDrive(
|
||||||
|
ctx context.Context,
|
||||||
|
srv graph.Servicer,
|
||||||
|
site string,
|
||||||
|
) (models.Driveable, error) {
|
||||||
|
d, err := srv.Client().
|
||||||
|
SitesById(site).
|
||||||
|
Drive().
|
||||||
|
Get(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, graph.Wrap(ctx, err, "getting site's drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return d, nil
|
return d, nil
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package operations
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -22,11 +23,12 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/connector/exchange/api"
|
exapi "github.com/alcionai/corso/src/internal/connector/exchange/api"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/mock"
|
"github.com/alcionai/corso/src/internal/connector/mock"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
|
odapi "github.com/alcionai/corso/src/internal/connector/onedrive/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
@ -41,6 +43,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
deeTD "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
@ -62,11 +65,9 @@ const incrementalsDestContainerPrefix = "incrementals_ci_"
|
|||||||
// prepNewTestBackupOp generates all clients required to run a backup operation,
|
// prepNewTestBackupOp generates all clients required to run a backup operation,
|
||||||
// returning both a backup operation created with those clients, as well as
|
// returning both a backup operation created with those clients, as well as
|
||||||
// the clients themselves.
|
// the clients themselves.
|
||||||
//
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func prepNewTestBackupOp(
|
func prepNewTestBackupOp(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
bus events.Eventer,
|
bus events.Eventer,
|
||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
featureToggles control.Toggles,
|
featureToggles control.Toggles,
|
||||||
@ -76,11 +77,11 @@ func prepNewTestBackupOp(
|
|||||||
account.Account,
|
account.Account,
|
||||||
*kopia.Wrapper,
|
*kopia.Wrapper,
|
||||||
*kopia.ModelStore,
|
*kopia.ModelStore,
|
||||||
|
streamstore.Streamer,
|
||||||
*connector.GraphConnector,
|
*connector.GraphConnector,
|
||||||
selectors.Selector,
|
selectors.Selector,
|
||||||
func(),
|
func(),
|
||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
var (
|
var (
|
||||||
acct = tester.NewM365Account(t)
|
acct = tester.NewM365Account(t)
|
||||||
// need to initialize the repository before we can test connecting to it.
|
// need to initialize the repository before we can test connecting to it.
|
||||||
@ -126,18 +127,18 @@ func prepNewTestBackupOp(
|
|||||||
gc, sel := GCWithSelector(t, ctx, acct, connectorResource, sel, nil, closer)
|
gc, sel := GCWithSelector(t, ctx, acct, connectorResource, sel, nil, closer)
|
||||||
bo := newTestBackupOp(t, ctx, kw, ms, gc, acct, sel, bus, featureToggles, closer)
|
bo := newTestBackupOp(t, ctx, kw, ms, gc, acct, sel, bus, featureToggles, closer)
|
||||||
|
|
||||||
return bo, acct, kw, ms, gc, sel, closer
|
ss := streamstore.NewStreamer(kw, acct.ID(), sel.PathService())
|
||||||
|
|
||||||
|
return bo, acct, kw, ms, ss, gc, sel, closer
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestBackupOp accepts the clients required to compose a backup operation, plus
|
// newTestBackupOp accepts the clients required to compose a backup operation, plus
|
||||||
// any other metadata, and uses them to generate a new backup operation. This
|
// any other metadata, and uses them to generate a new backup operation. This
|
||||||
// allows backup chains to utilize the same temp directory and configuration
|
// allows backup chains to utilize the same temp directory and configuration
|
||||||
// details.
|
// details.
|
||||||
//
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func newTestBackupOp(
|
func newTestBackupOp(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
kw *kopia.Wrapper,
|
kw *kopia.Wrapper,
|
||||||
ms *kopia.ModelStore,
|
ms *kopia.ModelStore,
|
||||||
gc *connector.GraphConnector,
|
gc *connector.GraphConnector,
|
||||||
@ -147,7 +148,6 @@ func newTestBackupOp(
|
|||||||
featureToggles control.Toggles,
|
featureToggles control.Toggles,
|
||||||
closer func(),
|
closer func(),
|
||||||
) BackupOperation {
|
) BackupOperation {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
var (
|
var (
|
||||||
sw = store.NewKopiaStore(ms)
|
sw = store.NewKopiaStore(ms)
|
||||||
opts = control.Defaults()
|
opts = control.Defaults()
|
||||||
@ -165,15 +165,13 @@ func newTestBackupOp(
|
|||||||
return bo
|
return bo
|
||||||
}
|
}
|
||||||
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func runAndCheckBackup(
|
func runAndCheckBackup(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
bo *BackupOperation,
|
bo *BackupOperation,
|
||||||
mb *evmock.Bus,
|
mb *evmock.Bus,
|
||||||
acceptNoData bool,
|
acceptNoData bool,
|
||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
err := bo.Run(ctx)
|
err := bo.Run(ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotEmpty(t, bo.Results, "the backup had non-zero results")
|
require.NotEmpty(t, bo.Results, "the backup had non-zero results")
|
||||||
@ -206,17 +204,15 @@ func runAndCheckBackup(
|
|||||||
bo.Results.BackupID, "backupID pre-declaration")
|
bo.Results.BackupID, "backupID pre-declaration")
|
||||||
}
|
}
|
||||||
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func checkBackupIsInManifests(
|
func checkBackupIsInManifests(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
kw *kopia.Wrapper,
|
kw *kopia.Wrapper,
|
||||||
bo *BackupOperation,
|
bo *BackupOperation,
|
||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
resourceOwner string,
|
resourceOwner string,
|
||||||
categories ...path.CategoryType,
|
categories ...path.CategoryType,
|
||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
for _, category := range categories {
|
for _, category := range categories {
|
||||||
t.Run(category.String(), func(t *testing.T) {
|
t.Run(category.String(), func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
@ -343,10 +339,9 @@ func checkMetadataFilesExist(
|
|||||||
// the callback provider can use them, or not, as wanted.
|
// the callback provider can use them, or not, as wanted.
|
||||||
type dataBuilderFunc func(id, timeStamp, subject, body string) []byte
|
type dataBuilderFunc func(id, timeStamp, subject, body string) []byte
|
||||||
|
|
||||||
//revive:disable:context-as-argument
|
|
||||||
func generateContainerOfItems(
|
func generateContainerOfItems(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
gc *connector.GraphConnector,
|
gc *connector.GraphConnector,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
@ -357,7 +352,6 @@ func generateContainerOfItems(
|
|||||||
backupVersion int,
|
backupVersion int,
|
||||||
dbf dataBuilderFunc,
|
dbf dataBuilderFunc,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
//revive:enable:context-as-argument
|
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
items := make([]incrementalItem, 0, howManyItems)
|
items := make([]incrementalItem, 0, howManyItems)
|
||||||
@ -588,7 +582,6 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
selector func() *selectors.ExchangeBackup
|
selector func() *selectors.ExchangeBackup
|
||||||
category path.CategoryType
|
category path.CategoryType
|
||||||
metadataFiles []string
|
metadataFiles []string
|
||||||
runIncremental bool
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Mail",
|
name: "Mail",
|
||||||
@ -601,7 +594,6 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
},
|
},
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
|
metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
|
||||||
runIncremental: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Contacts",
|
name: "Contacts",
|
||||||
@ -612,7 +604,6 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
},
|
},
|
||||||
category: path.ContactsCategory,
|
category: path.ContactsCategory,
|
||||||
metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
|
metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
|
||||||
runIncremental: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Calendar Events",
|
name: "Calendar Events",
|
||||||
@ -632,9 +623,10 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
mb = evmock.NewBus()
|
mb = evmock.NewBus()
|
||||||
sel = test.selector().Selector
|
sel = test.selector().Selector
|
||||||
ffs = control.Toggles{}
|
ffs = control.Toggles{}
|
||||||
|
whatSet = deeTD.CategoryFromRepoRef
|
||||||
)
|
)
|
||||||
|
|
||||||
bo, acct, kw, ms, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup)
|
bo, acct, kw, ms, ss, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
userID := sel.ID()
|
userID := sel.ID()
|
||||||
@ -656,9 +648,17 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
path.ExchangeService,
|
path.ExchangeService,
|
||||||
map[path.CategoryType][]string{test.category: test.metadataFiles})
|
map[path.CategoryType][]string{test.category: test.metadataFiles})
|
||||||
|
|
||||||
if !test.runIncremental {
|
_, expectDeets := deeTD.GetDeetsInBackup(
|
||||||
return
|
t,
|
||||||
}
|
ctx,
|
||||||
|
bo.Results.BackupID,
|
||||||
|
acct.ID(),
|
||||||
|
userID,
|
||||||
|
path.ExchangeService,
|
||||||
|
whatSet,
|
||||||
|
ms,
|
||||||
|
ss)
|
||||||
|
deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, whatSet, ms, ss, expectDeets, false)
|
||||||
|
|
||||||
// Basic, happy path incremental test. No changes are dictated or expected.
|
// Basic, happy path incremental test. No changes are dictated or expected.
|
||||||
// This only tests that an incremental backup is runnable at all, and that it
|
// This only tests that an incremental backup is runnable at all, and that it
|
||||||
@ -680,6 +680,15 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
userID,
|
userID,
|
||||||
path.ExchangeService,
|
path.ExchangeService,
|
||||||
map[path.CategoryType][]string{test.category: test.metadataFiles})
|
map[path.CategoryType][]string{test.category: test.metadataFiles})
|
||||||
|
deeTD.CheckBackupDetails(
|
||||||
|
t,
|
||||||
|
ctx,
|
||||||
|
incBO.Results.BackupID,
|
||||||
|
whatSet,
|
||||||
|
ms,
|
||||||
|
ss,
|
||||||
|
expectDeets,
|
||||||
|
false)
|
||||||
|
|
||||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||||
assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
|
assert.Greater(t, bo.Results.ItemsWritten, incBO.Results.ItemsWritten, "incremental items written")
|
||||||
@ -700,7 +709,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
|
|
||||||
// TestBackup_Run ensures that Integration Testing works
|
// TestBackup_Run ensures that Integration Testing works
|
||||||
// for the following scopes: Contacts, Events, and Mail
|
// for the following scopes: Contacts, Events, and Mail
|
||||||
func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalExchange() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
@ -712,6 +721,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
ffs = control.Toggles{}
|
ffs = control.Toggles{}
|
||||||
mb = evmock.NewBus()
|
mb = evmock.NewBus()
|
||||||
now = dttm.Now()
|
now = dttm.Now()
|
||||||
|
service = path.ExchangeService
|
||||||
categories = map[path.CategoryType][]string{
|
categories = map[path.CategoryType][]string{
|
||||||
path.EmailCategory: exchange.MetadataFileNames(path.EmailCategory),
|
path.EmailCategory: exchange.MetadataFileNames(path.EmailCategory),
|
||||||
path.ContactsCategory: exchange.MetadataFileNames(path.ContactsCategory),
|
path.ContactsCategory: exchange.MetadataFileNames(path.ContactsCategory),
|
||||||
@ -728,11 +738,12 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
// at this point is harmless.
|
// at this point is harmless.
|
||||||
containers = []string{container1, container2, container3, containerRename}
|
containers = []string{container1, container2, container3, containerRename}
|
||||||
sel = selectors.NewExchangeBackup([]string{suite.user})
|
sel = selectors.NewExchangeBackup([]string{suite.user})
|
||||||
|
whatSet = deeTD.CategoryFromRepoRef
|
||||||
)
|
)
|
||||||
|
|
||||||
gc, sels := GCWithSelector(t, ctx, acct, connector.Users, sel.Selector, nil, nil)
|
gc, sels := GCWithSelector(t, ctx, acct, connector.Users, sel.Selector, nil, nil)
|
||||||
sel, err := sels.ToExchangeBackup()
|
sel.DiscreteOwner = sels.ID()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
sel.DiscreteOwnerName = sels.Name()
|
||||||
|
|
||||||
uidn := inMock.NewProvider(sels.ID(), sels.Name())
|
uidn := inMock.NewProvider(sels.ID(), sels.Name())
|
||||||
|
|
||||||
@ -743,7 +754,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
ac, err := api.NewClient(m365)
|
ac, err := exapi.NewClient(m365)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// generate 3 new folders with two items each.
|
// generate 3 new folders with two items each.
|
||||||
@ -754,7 +765,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
// container into another generates a delta for both addition and deletion.
|
// container into another generates a delta for both addition and deletion.
|
||||||
type contDeets struct {
|
type contDeets struct {
|
||||||
containerID string
|
containerID string
|
||||||
deets *details.Details
|
locRef string
|
||||||
|
itemRefs []string // cached for populating expected deets, otherwise not used
|
||||||
}
|
}
|
||||||
|
|
||||||
mailDBF := func(id, timeStamp, subject, body string) []byte {
|
mailDBF := func(id, timeStamp, subject, body string) []byte {
|
||||||
@ -812,11 +824,14 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
// populate initial test data
|
// populate initial test data
|
||||||
for category, gen := range dataset {
|
for category, gen := range dataset {
|
||||||
for destName := range gen.dests {
|
for destName := range gen.dests {
|
||||||
|
// TODO: the details.Builder returned by restore can contain entries with
|
||||||
|
// incorrect information. non-representative repo-refs and the like. Until
|
||||||
|
// that gets fixed, we can't consume that info for testing.
|
||||||
deets := generateContainerOfItems(
|
deets := generateContainerOfItems(
|
||||||
t,
|
t,
|
||||||
ctx,
|
ctx,
|
||||||
gc,
|
gc,
|
||||||
path.ExchangeService,
|
service,
|
||||||
acct,
|
acct,
|
||||||
category,
|
category,
|
||||||
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
||||||
@ -825,41 +840,103 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
version.Backup,
|
version.Backup,
|
||||||
gen.dbf)
|
gen.dbf)
|
||||||
|
|
||||||
dataset[category].dests[destName] = contDeets{"", deets}
|
itemRefs := []string{}
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
if ent.Exchange == nil || ent.Folder != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ent.ItemRef) > 0 {
|
||||||
|
itemRefs = append(itemRefs, ent.ItemRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// save the item ids for building expectedDeets later on
|
||||||
|
cd := dataset[category].dests[destName]
|
||||||
|
cd.itemRefs = itemRefs
|
||||||
|
dataset[category].dests[destName] = cd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bo, acct, kw, ms, ss, gc, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, ffs, version.Backup)
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
// run the initial backup
|
||||||
|
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||||
|
|
||||||
|
rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// strip the category from the prefix; we primarily want the tenant and resource owner.
|
||||||
|
expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String())
|
||||||
|
bupDeets, _ := deeTD.GetDeetsInBackup(t, ctx, bo.Results.BackupID, acct.ID(), uidn.ID(), service, whatSet, ms, ss)
|
||||||
|
|
||||||
|
// update the datasets with their location refs
|
||||||
|
for category, gen := range dataset {
|
||||||
|
for destName, cd := range gen.dests {
|
||||||
|
var longestLR string
|
||||||
|
|
||||||
|
for _, ent := range bupDeets.Entries {
|
||||||
|
// generated destinations should always contain items
|
||||||
|
if ent.Folder != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := path.FromDataLayerPath(ent.RepoRef, false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// category must match, and the owning folder must be this destination
|
||||||
|
if p.Category() != category || strings.HasSuffix(ent.LocationRef, destName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// emails, due to folder nesting and our design for populating data via restore,
|
||||||
|
// will duplicate the dest folder as both the restore destination, and the "old parent
|
||||||
|
// folder". we'll get both a prefix/destName and a prefix/destName/destName folder.
|
||||||
|
// since we want future comparison to only use the leaf dir, we select for the longest match.
|
||||||
|
if len(ent.LocationRef) > len(longestLR) {
|
||||||
|
longestLR = ent.LocationRef
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotEmptyf(t, longestLR, "must find an expected details entry matching the generated folder: %s", destName)
|
||||||
|
|
||||||
|
cd.locRef = longestLR
|
||||||
|
|
||||||
|
dataset[category].dests[destName] = cd
|
||||||
|
expectDeets.AddLocation(category.String(), cd.locRef)
|
||||||
|
|
||||||
|
for _, i := range dataset[category].dests[destName].itemRefs {
|
||||||
|
expectDeets.AddItem(category.String(), cd.locRef, i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify test data was populated, and track it for comparisons
|
// verify test data was populated, and track it for comparisons
|
||||||
|
// TODO: this can be swapped out for InDeets checks if we add itemRefs to folder ents.
|
||||||
for category, gen := range dataset {
|
for category, gen := range dataset {
|
||||||
qp := graph.QueryParams{
|
qp := graph.QueryParams{
|
||||||
Category: category,
|
Category: category,
|
||||||
ResourceOwner: uidn,
|
ResourceOwner: uidn,
|
||||||
Credentials: m365,
|
Credentials: m365,
|
||||||
}
|
}
|
||||||
|
|
||||||
cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true))
|
cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true))
|
||||||
require.NoError(t, err, "populating container resolver", category, clues.ToCore(err))
|
require.NoError(t, err, "populating container resolver", category, clues.ToCore(err))
|
||||||
|
|
||||||
for destName, dest := range gen.dests {
|
for destName, dest := range gen.dests {
|
||||||
p, err := path.FromDataLayerPath(dest.deets.Entries[0].RepoRef, true)
|
id, ok := cr.LocationInCache(dest.locRef)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.True(t, ok, "dir %s found in %s cache", dest.locRef, category)
|
||||||
|
|
||||||
id, ok := cr.LocationInCache(p.Folder(false))
|
dest.containerID = id
|
||||||
require.True(t, ok, "dir %s found in %s cache", p.Folder(false), category)
|
dataset[category].dests[destName] = dest
|
||||||
|
|
||||||
d := dataset[category].dests[destName]
|
|
||||||
d.containerID = id
|
|
||||||
dataset[category].dests[destName] = d
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bo, _, kw, ms, gc, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, ffs, version.Backup)
|
// precheck to ensure the expectedDeets are correct.
|
||||||
defer closer()
|
// if we fail here, the expectedDeets were populated incorrectly.
|
||||||
|
deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, whatSet, ms, ss, expectDeets, true)
|
||||||
sel, err = sels.ToExchangeBackup()
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
// run the initial backup
|
|
||||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
|
||||||
|
|
||||||
// Although established as a table, these tests are no isolated from each other.
|
// Although established as a table, these tests are no isolated from each other.
|
||||||
// Assume that every test's side effects cascade to all following test cases.
|
// Assume that every test's side effects cascade to all following test cases.
|
||||||
@ -881,20 +958,25 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
{
|
{
|
||||||
name: "move an email folder to a subfolder",
|
name: "move an email folder to a subfolder",
|
||||||
updateUserData: func(t *testing.T) {
|
updateUserData: func(t *testing.T) {
|
||||||
|
cat := path.EmailCategory
|
||||||
|
|
||||||
// contacts and events cannot be sufoldered; this is an email-only change
|
// contacts and events cannot be sufoldered; this is an email-only change
|
||||||
toContainer := dataset[path.EmailCategory].dests[container1].containerID
|
from := dataset[cat].dests[container2]
|
||||||
fromContainer := dataset[path.EmailCategory].dests[container2].containerID
|
to := dataset[cat].dests[container1]
|
||||||
|
|
||||||
body := users.NewItemMailFoldersItemMovePostRequestBody()
|
body := users.NewItemMailFoldersItemMovePostRequestBody()
|
||||||
body.SetDestinationId(&toContainer)
|
body.SetDestinationId(ptr.To(to.containerID))
|
||||||
|
|
||||||
_, err := gc.Service.
|
_, err := gc.Service.
|
||||||
Client().
|
Client().
|
||||||
UsersById(uidn.ID()).
|
UsersById(uidn.ID()).
|
||||||
MailFoldersById(fromContainer).
|
MailFoldersById(from.containerID).
|
||||||
Move().
|
Move().
|
||||||
Post(ctx, body, nil)
|
Post(ctx, body, nil)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
newLoc := expectDeets.MoveLocation(cat.String(), from.locRef, to.locRef)
|
||||||
|
from.locRef = newLoc
|
||||||
},
|
},
|
||||||
itemsRead: 0, // zero because we don't count container reads
|
itemsRead: 0, // zero because we don't count container reads
|
||||||
itemsWritten: 2,
|
itemsWritten: 2,
|
||||||
@ -916,6 +998,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
err := ac.Events().DeleteContainer(ctx, uidn.ID(), containerID)
|
err := ac.Events().DeleteContainer(ctx, uidn.ID(), containerID)
|
||||||
require.NoError(t, err, "deleting a calendar", clues.ToCore(err))
|
require.NoError(t, err, "deleting a calendar", clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expectDeets.RemoveLocation(category.String(), d.dests[container2].locRef)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
@ -929,7 +1013,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
t,
|
t,
|
||||||
ctx,
|
ctx,
|
||||||
gc,
|
gc,
|
||||||
path.ExchangeService,
|
service,
|
||||||
acct,
|
acct,
|
||||||
category,
|
category,
|
||||||
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
||||||
@ -944,16 +1028,28 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
Credentials: m365,
|
Credentials: m365,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expectedLocRef := container3
|
||||||
|
if category == path.EmailCategory {
|
||||||
|
expectedLocRef = path.Builder{}.Append(container3, container3).String()
|
||||||
|
}
|
||||||
|
|
||||||
cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true))
|
cr, err := exchange.PopulateExchangeContainerResolver(ctx, qp, fault.New(true))
|
||||||
require.NoError(t, err, "populating container resolver", category, clues.ToCore(err))
|
require.NoError(t, err, "populating container resolver", category, clues.ToCore(err))
|
||||||
|
|
||||||
p, err := path.FromDataLayerPath(deets.Entries[0].RepoRef, true)
|
id, ok := cr.LocationInCache(expectedLocRef)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.Truef(t, ok, "dir %s found in %s cache", expectedLocRef, category)
|
||||||
|
|
||||||
id, ok := cr.LocationInCache(p.Folder(false))
|
dataset[category].dests[container3] = contDeets{
|
||||||
require.Truef(t, ok, "dir %s found in %s cache", p.Folder(false), category)
|
containerID: id,
|
||||||
|
locRef: expectedLocRef,
|
||||||
|
itemRefs: nil, // not needed at this point
|
||||||
|
}
|
||||||
|
|
||||||
dataset[category].dests[container3] = contDeets{id, deets}
|
for _, ent := range deets.Entries {
|
||||||
|
if ent.Folder == nil {
|
||||||
|
expectDeets.AddItem(category.String(), expectedLocRef, ent.ItemRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
itemsRead: 4,
|
itemsRead: 4,
|
||||||
@ -963,17 +1059,24 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
name: "rename a folder",
|
name: "rename a folder",
|
||||||
updateUserData: func(t *testing.T) {
|
updateUserData: func(t *testing.T) {
|
||||||
for category, d := range dataset {
|
for category, d := range dataset {
|
||||||
containerID := d.dests[container3].containerID
|
|
||||||
cli := gc.Service.Client().UsersById(uidn.ID())
|
cli := gc.Service.Client().UsersById(uidn.ID())
|
||||||
|
containerID := d.dests[container3].containerID
|
||||||
|
newLoc := containerRename
|
||||||
|
|
||||||
// copy the container info, since both names should
|
if category == path.EmailCategory {
|
||||||
// reference the same container by id. Though the
|
newLoc = path.Builder{}.Append(container3, containerRename).String()
|
||||||
// details refs won't line up, so those get deleted.
|
|
||||||
d.dests[containerRename] = contDeets{
|
|
||||||
containerID: d.dests[container3].containerID,
|
|
||||||
deets: nil,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.dests[containerRename] = contDeets{
|
||||||
|
containerID: containerID,
|
||||||
|
locRef: newLoc,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectDeets.RenameLocation(
|
||||||
|
category.String(),
|
||||||
|
d.dests[container3].containerID,
|
||||||
|
newLoc)
|
||||||
|
|
||||||
switch category {
|
switch category {
|
||||||
case path.EmailCategory:
|
case path.EmailCategory:
|
||||||
cmf := cli.MailFoldersById(containerID)
|
cmf := cli.MailFoldersById(containerID)
|
||||||
@ -1023,24 +1126,39 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
body, err := support.CreateMessageFromBytes(itemData)
|
body, err := support.CreateMessageFromBytes(itemData)
|
||||||
require.NoError(t, err, "transforming mail bytes to messageable", clues.ToCore(err))
|
require.NoError(t, err, "transforming mail bytes to messageable", clues.ToCore(err))
|
||||||
|
|
||||||
_, err = cli.MailFoldersById(containerID).Messages().Post(ctx, body, nil)
|
itm, err := cli.MailFoldersById(containerID).Messages().Post(ctx, body, nil)
|
||||||
require.NoError(t, err, "posting email item", clues.ToCore(err))
|
require.NoError(t, err, "posting email item", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.AddItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ptr.Val(itm.GetId()))
|
||||||
|
|
||||||
case path.ContactsCategory:
|
case path.ContactsCategory:
|
||||||
_, itemData := generateItemData(t, category, uidn.ID(), contactDBF)
|
_, itemData := generateItemData(t, category, uidn.ID(), contactDBF)
|
||||||
body, err := support.CreateContactFromBytes(itemData)
|
body, err := support.CreateContactFromBytes(itemData)
|
||||||
require.NoError(t, err, "transforming contact bytes to contactable", clues.ToCore(err))
|
require.NoError(t, err, "transforming contact bytes to contactable", clues.ToCore(err))
|
||||||
|
|
||||||
_, err = cli.ContactFoldersById(containerID).Contacts().Post(ctx, body, nil)
|
itm, err := cli.ContactFoldersById(containerID).Contacts().Post(ctx, body, nil)
|
||||||
require.NoError(t, err, "posting contact item", clues.ToCore(err))
|
require.NoError(t, err, "posting contact item", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.AddItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ptr.Val(itm.GetId()))
|
||||||
|
|
||||||
case path.EventsCategory:
|
case path.EventsCategory:
|
||||||
_, itemData := generateItemData(t, category, uidn.ID(), eventDBF)
|
_, itemData := generateItemData(t, category, uidn.ID(), eventDBF)
|
||||||
body, err := support.CreateEventFromBytes(itemData)
|
body, err := support.CreateEventFromBytes(itemData)
|
||||||
require.NoError(t, err, "transforming event bytes to eventable", clues.ToCore(err))
|
require.NoError(t, err, "transforming event bytes to eventable", clues.ToCore(err))
|
||||||
|
|
||||||
_, err = cli.CalendarsById(containerID).Events().Post(ctx, body, nil)
|
itm, err := cli.CalendarsById(containerID).Events().Post(ctx, body, nil)
|
||||||
require.NoError(t, err, "posting events item", clues.ToCore(err))
|
require.NoError(t, err, "posting events item", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.AddItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ptr.Val(itm.GetId()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1063,6 +1181,11 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
err = cli.MessagesById(ids[0]).Delete(ctx, nil)
|
err = cli.MessagesById(ids[0]).Delete(ctx, nil)
|
||||||
require.NoError(t, err, "deleting email item", clues.ToCore(err))
|
require.NoError(t, err, "deleting email item", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.RemoveItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ids[0])
|
||||||
|
|
||||||
case path.ContactsCategory:
|
case path.ContactsCategory:
|
||||||
ids, _, _, err := ac.Contacts().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false)
|
ids, _, _, err := ac.Contacts().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false)
|
||||||
require.NoError(t, err, "getting contact ids", clues.ToCore(err))
|
require.NoError(t, err, "getting contact ids", clues.ToCore(err))
|
||||||
@ -1071,6 +1194,11 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
err = cli.ContactsById(ids[0]).Delete(ctx, nil)
|
err = cli.ContactsById(ids[0]).Delete(ctx, nil)
|
||||||
require.NoError(t, err, "deleting contact item", clues.ToCore(err))
|
require.NoError(t, err, "deleting contact item", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.RemoveItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ids[0])
|
||||||
|
|
||||||
case path.EventsCategory:
|
case path.EventsCategory:
|
||||||
ids, _, _, err := ac.Events().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false)
|
ids, _, _, err := ac.Events().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false)
|
||||||
require.NoError(t, err, "getting event ids", clues.ToCore(err))
|
require.NoError(t, err, "getting event ids", clues.ToCore(err))
|
||||||
@ -1078,6 +1206,11 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
|
|
||||||
err = cli.CalendarsById(ids[0]).Delete(ctx, nil)
|
err = cli.CalendarsById(ids[0]).Delete(ctx, nil)
|
||||||
require.NoError(t, err, "deleting calendar", clues.ToCore(err))
|
require.NoError(t, err, "deleting calendar", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.RemoveItem(
|
||||||
|
category.String(),
|
||||||
|
d.dests[category.String()].locRef,
|
||||||
|
ids[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1090,24 +1223,20 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
incMB = evmock.NewBus()
|
incMB = evmock.NewBus()
|
||||||
incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sel.Selector, incMB, ffs, closer)
|
incBO = newTestBackupOp(t, ctx, kw, ms, gc, acct, sels, incMB, ffs, closer)
|
||||||
|
atid = m365.AzureTenantID
|
||||||
)
|
)
|
||||||
|
|
||||||
test.updateUserData(t)
|
test.updateUserData(t)
|
||||||
|
|
||||||
err := incBO.Run(ctx)
|
err := incBO.Run(ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
checkBackupIsInManifests(t, ctx, kw, &incBO, sel.Selector, uidn.ID(), maps.Keys(categories)...)
|
|
||||||
checkMetadataFilesExist(
|
bupID := incBO.Results.BackupID
|
||||||
t,
|
|
||||||
ctx,
|
checkBackupIsInManifests(t, ctx, kw, &incBO, sels, uidn.ID(), maps.Keys(categories)...)
|
||||||
incBO.Results.BackupID,
|
checkMetadataFilesExist(t, ctx, bupID, kw, ms, atid, uidn.ID(), service, categories)
|
||||||
kw,
|
deeTD.CheckBackupDetails(t, ctx, bupID, whatSet, ms, ss, expectDeets, true)
|
||||||
ms,
|
|
||||||
m365.AzureTenantID,
|
|
||||||
uidn.ID(),
|
|
||||||
path.ExchangeService,
|
|
||||||
categories)
|
|
||||||
|
|
||||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||||
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
||||||
@ -1119,7 +1248,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
incMB.CalledWith[events.BackupStart][0][events.BackupID],
|
incMB.CalledWith[events.BackupStart][0][events.BackupID],
|
||||||
incBO.Results.BackupID, "incremental backupID pre-declaration")
|
bupID, "incremental backupID pre-declaration")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1134,20 +1263,28 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
|
tenID = tester.M365TenantID(t)
|
||||||
mb = evmock.NewBus()
|
mb = evmock.NewBus()
|
||||||
m365UserID = tester.SecondaryM365UserID(t)
|
userID = tester.SecondaryM365UserID(t)
|
||||||
osel = selectors.NewOneDriveBackup([]string{m365UserID})
|
osel = selectors.NewOneDriveBackup([]string{userID})
|
||||||
|
ws = deeTD.DriveIDFromRepoRef
|
||||||
|
svc = path.OneDriveService
|
||||||
)
|
)
|
||||||
|
|
||||||
osel.Include(selTD.OneDriveBackupFolderScope(osel))
|
osel.Include(selTD.OneDriveBackupFolderScope(osel))
|
||||||
|
|
||||||
bo, _, _, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, osel.Selector, control.Toggles{}, version.Backup)
|
bo, _, _, ms, ss, _, sel, closer := prepNewTestBackupOp(t, ctx, mb, osel.Selector, control.Toggles{}, version.Backup)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||||
|
|
||||||
|
bID := bo.Results.BackupID
|
||||||
|
|
||||||
|
_, expectDeets := deeTD.GetDeetsInBackup(t, ctx, bID, tenID, sel.ID(), svc, ws, ms, ss)
|
||||||
|
deeTD.CheckBackupDetails(t, ctx, bID, ws, ms, ss, expectDeets, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() {
|
func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalOneDrive() {
|
||||||
sel := selectors.NewOneDriveRestore([]string{suite.user})
|
sel := selectors.NewOneDriveRestore([]string{suite.user})
|
||||||
|
|
||||||
ic := func(cs []string) selectors.Selector {
|
ic := func(cs []string) selectors.Selector {
|
||||||
@ -1158,9 +1295,9 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() {
|
|||||||
gtdi := func(
|
gtdi := func(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
svc graph.Servicer,
|
gs graph.Servicer,
|
||||||
) string {
|
) string {
|
||||||
d, err := svc.Client().UsersById(suite.user).Drive().Get(ctx, nil)
|
d, err := odapi.GetUsersDrive(ctx, gs, suite.user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = graph.Wrap(ctx, err, "retrieving default user drive").
|
err = graph.Wrap(ctx, err, "retrieving default user drive").
|
||||||
With("user", suite.user)
|
With("user", suite.user)
|
||||||
@ -1186,7 +1323,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveIncrementals() {
|
|||||||
false)
|
false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePointIncrementals() {
|
func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalSharePoint() {
|
||||||
sel := selectors.NewSharePointRestore([]string{suite.site})
|
sel := selectors.NewSharePointRestore([]string{suite.site})
|
||||||
|
|
||||||
ic := func(cs []string) selectors.Selector {
|
ic := func(cs []string) selectors.Selector {
|
||||||
@ -1197,9 +1334,9 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePointIncrementals() {
|
|||||||
gtdi := func(
|
gtdi := func(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
svc graph.Servicer,
|
gs graph.Servicer,
|
||||||
) string {
|
) string {
|
||||||
d, err := svc.Client().SitesById(suite.site).Drive().Get(ctx, nil)
|
d, err := odapi.GetSitesDefaultDrive(ctx, gs, suite.site)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = graph.Wrap(ctx, err, "retrieving default site drive").
|
err = graph.Wrap(ctx, err, "retrieving default site drive").
|
||||||
With("site", suite.site)
|
With("site", suite.site)
|
||||||
@ -1243,6 +1380,7 @@ func runDriveIncrementalTest(
|
|||||||
acct = tester.NewM365Account(t)
|
acct = tester.NewM365Account(t)
|
||||||
ffs = control.Toggles{}
|
ffs = control.Toggles{}
|
||||||
mb = evmock.NewBus()
|
mb = evmock.NewBus()
|
||||||
|
ws = deeTD.DriveIDFromRepoRef
|
||||||
|
|
||||||
// `now` has to be formatted with SimpleDateTimeTesting as
|
// `now` has to be formatted with SimpleDateTimeTesting as
|
||||||
// some drives cannot have `:` in file/folder names
|
// some drives cannot have `:` in file/folder names
|
||||||
@ -1254,6 +1392,7 @@ func runDriveIncrementalTest(
|
|||||||
container1 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 1, now)
|
container1 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 1, now)
|
||||||
container2 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 2, now)
|
container2 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 2, now)
|
||||||
container3 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 3, now)
|
container3 = fmt.Sprintf("%s%d_%s", incrementalsDestContainerPrefix, 3, now)
|
||||||
|
containerRename = "renamed_folder"
|
||||||
|
|
||||||
genDests = []string{container1, container2}
|
genDests = []string{container1, container2}
|
||||||
|
|
||||||
@ -1269,13 +1408,26 @@ func runDriveIncrementalTest(
|
|||||||
|
|
||||||
gc, sel := GCWithSelector(t, ctx, acct, resource, sel, nil, nil)
|
gc, sel := GCWithSelector(t, ctx, acct, resource, sel, nil, nil)
|
||||||
|
|
||||||
|
roidn := inMock.NewProvider(sel.ID(), sel.Name())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
atid = creds.AzureTenantID
|
||||||
driveID = getTestDriveID(t, ctx, gc.Service)
|
driveID = getTestDriveID(t, ctx, gc.Service)
|
||||||
fileDBF = func(id, timeStamp, subject, body string) []byte {
|
fileDBF = func(id, timeStamp, subject, body string) []byte {
|
||||||
return []byte(id + subject)
|
return []byte(id + subject)
|
||||||
}
|
}
|
||||||
|
makeLocRef = func(flds ...string) string {
|
||||||
|
elems := append([]string{driveID, "root:"}, flds...)
|
||||||
|
return path.Builder{}.Append(elems...).String()
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rrPfx, err := path.ServicePrefix(atid, roidn.ID(), service, category)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// strip the category from the prefix; we primarily want the tenant and resource owner.
|
||||||
|
expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String())
|
||||||
|
|
||||||
// Populate initial test data.
|
// Populate initial test data.
|
||||||
// Generate 2 new folders with two items each. Only the first two
|
// Generate 2 new folders with two items each. Only the first two
|
||||||
// folders will be part of the initial backup and
|
// folders will be part of the initial backup and
|
||||||
@ -1283,7 +1435,7 @@ func runDriveIncrementalTest(
|
|||||||
// through the changes. This should be enough to cover most delta
|
// through the changes. This should be enough to cover most delta
|
||||||
// actions.
|
// actions.
|
||||||
for _, destName := range genDests {
|
for _, destName := range genDests {
|
||||||
generateContainerOfItems(
|
deets := generateContainerOfItems(
|
||||||
t,
|
t,
|
||||||
ctx,
|
ctx,
|
||||||
gc,
|
gc,
|
||||||
@ -1291,11 +1443,19 @@ func runDriveIncrementalTest(
|
|||||||
acct,
|
acct,
|
||||||
category,
|
category,
|
||||||
sel,
|
sel,
|
||||||
creds.AzureTenantID, owner, driveID, destName,
|
atid, roidn.ID(), driveID, destName,
|
||||||
2,
|
2,
|
||||||
// Use an old backup version so we don't need metadata files.
|
// Use an old backup version so we don't need metadata files.
|
||||||
0,
|
0,
|
||||||
fileDBF)
|
fileDBF)
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
if ent.Folder != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
expectDeets.AddItem(driveID, makeLocRef(destName), ent.ItemRef)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
containerIDs := map[string]string{}
|
containerIDs := map[string]string{}
|
||||||
@ -1313,15 +1473,20 @@ func runDriveIncrementalTest(
|
|||||||
containerIDs[destName] = ptr.Val(resp.GetId())
|
containerIDs[destName] = ptr.Val(resp.GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
bo, _, kw, ms, gc, _, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup)
|
bo, _, kw, ms, ss, gc, _, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs, version.Backup)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
// run the initial backup
|
// run the initial backup
|
||||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||||
|
|
||||||
|
// precheck to ensure the expectedDeets are correct.
|
||||||
|
// if we fail here, the expectedDeets were populated incorrectly.
|
||||||
|
deeTD.CheckBackupDetails(t, ctx, bo.Results.BackupID, ws, ms, ss, expectDeets, true)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
newFile models.DriveItemable
|
newFile models.DriveItemable
|
||||||
newFileName = "new_file.txt"
|
newFileName = "new_file.txt"
|
||||||
|
newFileID string
|
||||||
|
|
||||||
permissionIDMappings = map[string]string{}
|
permissionIDMappings = map[string]string{}
|
||||||
writePerm = metadata.Permission{
|
writePerm = metadata.Permission{
|
||||||
@ -1363,6 +1528,10 @@ func runDriveIncrementalTest(
|
|||||||
targetContainer,
|
targetContainer,
|
||||||
driveItem)
|
driveItem)
|
||||||
require.NoErrorf(t, err, "creating new file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "creating new file %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
newFileID = ptr.Val(newFile.GetId())
|
||||||
|
|
||||||
|
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
@ -1382,8 +1551,10 @@ func runDriveIncrementalTest(
|
|||||||
*newFile.GetId(),
|
*newFile.GetId(),
|
||||||
[]metadata.Permission{writePerm},
|
[]metadata.Permission{writePerm},
|
||||||
[]metadata.Permission{},
|
[]metadata.Permission{},
|
||||||
permissionIDMappings)
|
permissionIDMappings,
|
||||||
|
)
|
||||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||||
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||||
@ -1403,8 +1574,10 @@ func runDriveIncrementalTest(
|
|||||||
*newFile.GetId(),
|
*newFile.GetId(),
|
||||||
[]metadata.Permission{},
|
[]metadata.Permission{},
|
||||||
[]metadata.Permission{writePerm},
|
[]metadata.Permission{writePerm},
|
||||||
permissionIDMappings)
|
permissionIDMappings,
|
||||||
|
)
|
||||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||||
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||||
@ -1425,8 +1598,10 @@ func runDriveIncrementalTest(
|
|||||||
targetContainer,
|
targetContainer,
|
||||||
[]metadata.Permission{writePerm},
|
[]metadata.Permission{writePerm},
|
||||||
[]metadata.Permission{},
|
[]metadata.Permission{},
|
||||||
permissionIDMappings)
|
permissionIDMappings,
|
||||||
|
)
|
||||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||||
|
// no expectedDeets: metadata isn't tracked5tgb
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 1, // .dirmeta for collection
|
itemsWritten: 1, // .dirmeta for collection
|
||||||
@ -1447,8 +1622,10 @@ func runDriveIncrementalTest(
|
|||||||
targetContainer,
|
targetContainer,
|
||||||
[]metadata.Permission{},
|
[]metadata.Permission{},
|
||||||
[]metadata.Permission{writePerm},
|
[]metadata.Permission{writePerm},
|
||||||
permissionIDMappings)
|
permissionIDMappings,
|
||||||
|
)
|
||||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||||
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 1, // .dirmeta for collection
|
itemsWritten: 1, // .dirmeta for collection
|
||||||
@ -1463,6 +1640,7 @@ func runDriveIncrementalTest(
|
|||||||
Content().
|
Content().
|
||||||
Put(ctx, []byte("new content"), nil)
|
Put(ctx, []byte("new content"), nil)
|
||||||
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
|
||||||
|
// no expectedDeets: neither file id nor location changed
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
@ -1488,11 +1666,12 @@ func runDriveIncrementalTest(
|
|||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
|
// no expectedDeets: neither file id nor location changed
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "move a file between folders",
|
name: "move a file between folders",
|
||||||
updateFiles: func(t *testing.T) {
|
updateFiles: func(t *testing.T) {
|
||||||
dest := containerIDs[container1]
|
dest := containerIDs[container2]
|
||||||
|
|
||||||
driveItem := models.NewDriveItem()
|
driveItem := models.NewDriveItem()
|
||||||
driveItem.SetName(&newFileName)
|
driveItem.SetName(&newFileName)
|
||||||
@ -1506,6 +1685,12 @@ func runDriveIncrementalTest(
|
|||||||
ItemsById(ptr.Val(newFile.GetId())).
|
ItemsById(ptr.Val(newFile.GetId())).
|
||||||
Patch(ctx, driveItem, nil)
|
Patch(ctx, driveItem, nil)
|
||||||
require.NoErrorf(t, err, "moving file between folders %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "moving file between folders %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.MoveItem(
|
||||||
|
driveID,
|
||||||
|
makeLocRef(container1),
|
||||||
|
makeLocRef(container2),
|
||||||
|
ptr.Val(newFile.GetId()))
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
@ -1521,6 +1706,8 @@ func runDriveIncrementalTest(
|
|||||||
ItemsById(ptr.Val(newFile.GetId())).
|
ItemsById(ptr.Val(newFile.GetId())).
|
||||||
Delete(ctx, nil)
|
Delete(ctx, nil)
|
||||||
require.NoErrorf(t, err, "deleting file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "deleting file %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 0,
|
itemsWritten: 0,
|
||||||
@ -1528,21 +1715,26 @@ func runDriveIncrementalTest(
|
|||||||
{
|
{
|
||||||
name: "move a folder to a subfolder",
|
name: "move a folder to a subfolder",
|
||||||
updateFiles: func(t *testing.T) {
|
updateFiles: func(t *testing.T) {
|
||||||
dest := containerIDs[container1]
|
parent := containerIDs[container1]
|
||||||
source := containerIDs[container2]
|
child := containerIDs[container2]
|
||||||
|
|
||||||
driveItem := models.NewDriveItem()
|
driveItem := models.NewDriveItem()
|
||||||
driveItem.SetName(&container2)
|
driveItem.SetName(&container2)
|
||||||
parentRef := models.NewItemReference()
|
parentRef := models.NewItemReference()
|
||||||
parentRef.SetId(&dest)
|
parentRef.SetId(&parent)
|
||||||
driveItem.SetParentReference(parentRef)
|
driveItem.SetParentReference(parentRef)
|
||||||
|
|
||||||
_, err := gc.Service.
|
_, err := gc.Service.
|
||||||
Client().
|
Client().
|
||||||
DrivesById(driveID).
|
DrivesById(driveID).
|
||||||
ItemsById(source).
|
ItemsById(child).
|
||||||
Patch(ctx, driveItem, nil)
|
Patch(ctx, driveItem, nil)
|
||||||
require.NoError(t, err, "moving folder", clues.ToCore(err))
|
require.NoError(t, err, "moving folder", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.MoveLocation(
|
||||||
|
driveID,
|
||||||
|
makeLocRef(container2),
|
||||||
|
makeLocRef(container1))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||||
@ -1554,8 +1746,7 @@ func runDriveIncrementalTest(
|
|||||||
child := containerIDs[container2]
|
child := containerIDs[container2]
|
||||||
|
|
||||||
driveItem := models.NewDriveItem()
|
driveItem := models.NewDriveItem()
|
||||||
name := "renamed_folder"
|
driveItem.SetName(&containerRename)
|
||||||
driveItem.SetName(&name)
|
|
||||||
parentRef := models.NewItemReference()
|
parentRef := models.NewItemReference()
|
||||||
parentRef.SetId(&parent)
|
parentRef.SetId(&parent)
|
||||||
driveItem.SetParentReference(parentRef)
|
driveItem.SetParentReference(parentRef)
|
||||||
@ -1566,6 +1757,13 @@ func runDriveIncrementalTest(
|
|||||||
ItemsById(child).
|
ItemsById(child).
|
||||||
Patch(ctx, driveItem, nil)
|
Patch(ctx, driveItem, nil)
|
||||||
require.NoError(t, err, "renaming folder", clues.ToCore(err))
|
require.NoError(t, err, "renaming folder", clues.ToCore(err))
|
||||||
|
|
||||||
|
containerIDs[containerRename] = containerIDs[container2]
|
||||||
|
|
||||||
|
expectDeets.RenameLocation(
|
||||||
|
driveID,
|
||||||
|
makeLocRef(container1, container2),
|
||||||
|
makeLocRef(container1, containerRename))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||||
@ -1573,7 +1771,7 @@ func runDriveIncrementalTest(
|
|||||||
{
|
{
|
||||||
name: "delete a folder",
|
name: "delete a folder",
|
||||||
updateFiles: func(t *testing.T) {
|
updateFiles: func(t *testing.T) {
|
||||||
container := containerIDs[container2]
|
container := containerIDs[containerRename]
|
||||||
// deletes require unique http clients
|
// deletes require unique http clients
|
||||||
// https://github.com/alcionai/corso/issues/2707
|
// https://github.com/alcionai/corso/issues/2707
|
||||||
err = newDeleteServicer(t).
|
err = newDeleteServicer(t).
|
||||||
@ -1582,6 +1780,8 @@ func runDriveIncrementalTest(
|
|||||||
ItemsById(container).
|
ItemsById(container).
|
||||||
Delete(ctx, nil)
|
Delete(ctx, nil)
|
||||||
require.NoError(t, err, "deleting folder", clues.ToCore(err))
|
require.NoError(t, err, "deleting folder", clues.ToCore(err))
|
||||||
|
|
||||||
|
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 0,
|
itemsWritten: 0,
|
||||||
@ -1597,7 +1797,7 @@ func runDriveIncrementalTest(
|
|||||||
acct,
|
acct,
|
||||||
category,
|
category,
|
||||||
sel,
|
sel,
|
||||||
creds.AzureTenantID, owner, driveID, container3,
|
atid, roidn.ID(), driveID, container3,
|
||||||
2,
|
2,
|
||||||
0,
|
0,
|
||||||
fileDBF)
|
fileDBF)
|
||||||
@ -1612,6 +1812,8 @@ func runDriveIncrementalTest(
|
|||||||
require.NoError(t, err, "getting drive folder ID", "folder name", container3, clues.ToCore(err))
|
require.NoError(t, err, "getting drive folder ID", "folder name", container3, clues.ToCore(err))
|
||||||
|
|
||||||
containerIDs[container3] = ptr.Val(resp.GetId())
|
containerIDs[container3] = ptr.Val(resp.GetId())
|
||||||
|
|
||||||
|
expectDeets.AddLocation(driveID, container3)
|
||||||
},
|
},
|
||||||
itemsRead: 2, // 2 .data for 2 files
|
itemsRead: 2, // 2 .data for 2 files
|
||||||
itemsWritten: 6, // read items + 2 directory meta
|
itemsWritten: 6, // read items + 2 directory meta
|
||||||
@ -1639,17 +1841,11 @@ func runDriveIncrementalTest(
|
|||||||
err = incBO.Run(ctx)
|
err = incBO.Run(ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
checkBackupIsInManifests(t, ctx, kw, &incBO, sel, sel.ID(), maps.Keys(categories)...)
|
bupID := incBO.Results.BackupID
|
||||||
checkMetadataFilesExist(
|
|
||||||
t,
|
checkBackupIsInManifests(t, ctx, kw, &incBO, sel, roidn.ID(), maps.Keys(categories)...)
|
||||||
ctx,
|
checkMetadataFilesExist(t, ctx, bupID, kw, ms, atid, roidn.ID(), service, categories)
|
||||||
incBO.Results.BackupID,
|
deeTD.CheckBackupDetails(t, ctx, bupID, ws, ms, ss, expectDeets, true)
|
||||||
kw,
|
|
||||||
ms,
|
|
||||||
creds.AzureTenantID,
|
|
||||||
sel.ID(),
|
|
||||||
service,
|
|
||||||
categories)
|
|
||||||
|
|
||||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||||
@ -1661,7 +1857,7 @@ func runDriveIncrementalTest(
|
|||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
incMB.CalledWith[events.BackupStart][0][events.BackupID],
|
incMB.CalledWith[events.BackupStart][0][events.BackupID],
|
||||||
incBO.Results.BackupID, "incremental backupID pre-declaration")
|
bupID, "incremental backupID pre-declaration")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1699,7 +1895,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
|||||||
oldsel := selectors.NewOneDriveBackup([]string{uname})
|
oldsel := selectors.NewOneDriveBackup([]string{uname})
|
||||||
oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel))
|
oldsel.Include(selTD.OneDriveBackupFolderScope(oldsel))
|
||||||
|
|
||||||
bo, _, kw, ms, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, ffs, 0)
|
bo, _, kw, ms, _, gc, sel, closer := prepNewTestBackupOp(t, ctx, mb, oldsel.Selector, ffs, 0)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
// ensure the initial owner uses name in both cases
|
// ensure the initial owner uses name in both cases
|
||||||
@ -1800,7 +1996,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePoint() {
|
|||||||
|
|
||||||
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||||
|
|
||||||
bo, _, kw, _, _, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}, version.Backup)
|
bo, _, kw, _, _, _, sels, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}, version.Backup)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||||
|
|||||||
368
src/pkg/backup/details/testdata/in_deets.go
vendored
Normal file
368
src/pkg/backup/details/testdata/in_deets.go
vendored
Normal file
@ -0,0 +1,368 @@
|
|||||||
|
package testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
|
"github.com/alcionai/corso/src/internal/streamstore"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// location set handling
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
var exists = struct{}{}
|
||||||
|
|
||||||
|
type locSet struct {
|
||||||
|
// map [locationRef] map [itemRef] {}
|
||||||
|
// refs may be either the canonical ent refs, or something else,
|
||||||
|
// so long as they are consistent for the test in question
|
||||||
|
Locations map[string]map[string]struct{}
|
||||||
|
Deleted map[string]map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLocSet() *locSet {
|
||||||
|
return &locSet{
|
||||||
|
Locations: map[string]map[string]struct{}{},
|
||||||
|
Deleted: map[string]map[string]struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) AddItem(locationRef, itemRef string) {
|
||||||
|
ls.AddLocation(locationRef)
|
||||||
|
|
||||||
|
ls.Locations[locationRef][itemRef] = exists
|
||||||
|
delete(ls.Deleted[locationRef], itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RemoveItem(locationRef, itemRef string) {
|
||||||
|
delete(ls.Locations[locationRef], itemRef)
|
||||||
|
|
||||||
|
if _, ok := ls.Deleted[locationRef]; !ok {
|
||||||
|
ls.Deleted[locationRef] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
ls.Deleted[locationRef][itemRef] = exists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) MoveItem(fromLocation, toLocation, ir string) {
|
||||||
|
ls.RemoveItem(fromLocation, ir)
|
||||||
|
ls.AddItem(toLocation, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) AddLocation(locationRef string) {
|
||||||
|
if _, ok := ls.Locations[locationRef]; !ok {
|
||||||
|
ls.Locations[locationRef] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
// don't purge previously deleted items, or child locations.
|
||||||
|
// Assumption is that their itemRef is unique, and still deleted.
|
||||||
|
delete(ls.Deleted, locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RemoveLocation(locationRef string) {
|
||||||
|
ss := ls.Subset(locationRef)
|
||||||
|
|
||||||
|
for lr := range ss.Locations {
|
||||||
|
items := ls.Locations[lr]
|
||||||
|
|
||||||
|
delete(ls.Locations, lr)
|
||||||
|
|
||||||
|
if _, ok := ls.Deleted[lr]; !ok {
|
||||||
|
ls.Deleted[lr] = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ls.Deleted[lr][ir] = exists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveLocation takes the LAST elemet in the fromLocation (and all)
|
||||||
|
// children matching the prefix, and relocates it as a child of toLocation.
|
||||||
|
// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix
|
||||||
|
// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children.
|
||||||
|
// assumes item IDs don't change across the migration. If item IDs do change,
|
||||||
|
// that difference will need to be handled manually by the caller.
|
||||||
|
// returns the base folder's new location (ex: /d/c)
|
||||||
|
func (ls *locSet) MoveLocation(fromLocation, toLocation string) string {
|
||||||
|
fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...)
|
||||||
|
toBuilder := path.Builder{}.Append(path.Split(toLocation)...).Append(fromBuilder.LastElem())
|
||||||
|
|
||||||
|
ls.RenameLocation(fromBuilder.String(), toBuilder.String())
|
||||||
|
|
||||||
|
return toBuilder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *locSet) RenameLocation(fromLocation, toLocation string) {
|
||||||
|
ss := ls.Subset(fromLocation)
|
||||||
|
fromBuilder := path.Builder{}.Append(path.Split(fromLocation)...)
|
||||||
|
toBuilder := path.Builder{}.Append(path.Split(toLocation)...)
|
||||||
|
|
||||||
|
for lr, items := range ss.Locations {
|
||||||
|
lrBuilder := path.Builder{}.Append(path.Split(lr)...)
|
||||||
|
lrBuilder.UpdateParent(fromBuilder, toBuilder)
|
||||||
|
|
||||||
|
newLoc := lrBuilder.String()
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ls.RemoveItem(lr, ir)
|
||||||
|
ls.AddItem(newLoc, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
ls.RemoveLocation(lr)
|
||||||
|
ls.AddLocation(newLoc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subset produces a new locSet containing only Items and Locations
|
||||||
|
// whose location matches the locationPfx
|
||||||
|
func (ls *locSet) Subset(locationPfx string) *locSet {
|
||||||
|
ss := newLocSet()
|
||||||
|
|
||||||
|
for lr, items := range ls.Locations {
|
||||||
|
if strings.HasPrefix(lr, locationPfx) {
|
||||||
|
ss.AddLocation(lr)
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
ss.AddItem(lr, ir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// The goal of InDeets is to provide a struct and interface which allows
|
||||||
|
// tests to predict not just the elements within a set of details entries,
|
||||||
|
// but also their changes (relocation, renaming, etc) in a way that consolidates
|
||||||
|
// building an "expected set" of details entries that can be compared against
|
||||||
|
// the details results after a backup.
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// InDeets is a helper for comparing details state in tests
|
||||||
|
// across backup instances.
|
||||||
|
type InDeets struct {
|
||||||
|
// only: tenantID/service/resourceOwnerID
|
||||||
|
RRPrefix string
|
||||||
|
// map of container setting the uniqueness boundary for location
|
||||||
|
// ref entries (eg, data type like email, contacts, etc, or
|
||||||
|
// drive id) to the unique entries in that set.
|
||||||
|
Sets map[string]*locSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInDeets(repoRefPrefix string) *InDeets {
|
||||||
|
return &InDeets{
|
||||||
|
RRPrefix: repoRefPrefix,
|
||||||
|
Sets: map[string]*locSet{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) getSet(set string) *locSet {
|
||||||
|
s, ok := id.Sets[set]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return newLocSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddAll(deets details.Details, ws whatSet) {
|
||||||
|
if id.Sets == nil {
|
||||||
|
id.Sets = map[string]*locSet{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
set, err := ws(ent)
|
||||||
|
if err != nil {
|
||||||
|
set = err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := ent.LocationRef
|
||||||
|
|
||||||
|
if ent.Folder != nil {
|
||||||
|
dir = dir + ent.Folder.DisplayName
|
||||||
|
id.AddLocation(set, dir)
|
||||||
|
} else {
|
||||||
|
id.AddItem(set, ent.LocationRef, ent.ItemRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddItem(set, locationRef, itemRef string) {
|
||||||
|
id.getSet(set).AddItem(locationRef, itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) RemoveItem(set, locationRef, itemRef string) {
|
||||||
|
id.getSet(set).RemoveItem(locationRef, itemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) MoveItem(set, fromLocation, toLocation, ir string) {
|
||||||
|
id.getSet(set).MoveItem(fromLocation, toLocation, ir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) AddLocation(set, locationRef string) {
|
||||||
|
id.getSet(set).AddLocation(locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveLocation removes the provided location, and all children
|
||||||
|
// of that location.
|
||||||
|
func (id *InDeets) RemoveLocation(set, locationRef string) {
|
||||||
|
id.getSet(set).RemoveLocation(locationRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveLocation takes the LAST elemet in the fromLocation (and all)
|
||||||
|
// children matching the prefix, and relocates it as a child of toLocation.
|
||||||
|
// ex: MoveLocation("/a/b/c", "/d") will move all entries with the prefix
|
||||||
|
// "/a/b/c" into "/d/c". This also deletes all "/a/b/c" entries and children.
|
||||||
|
// assumes item IDs don't change across the migration. If item IDs do change,
|
||||||
|
// that difference will need to be handled manually by the caller.
|
||||||
|
// returns the base folder's new location (ex: /d/c)
|
||||||
|
func (id *InDeets) MoveLocation(set, fromLocation, toLocation string) string {
|
||||||
|
return id.getSet(set).MoveLocation(fromLocation, toLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *InDeets) RenameLocation(set, fromLocation, toLocation string) {
|
||||||
|
id.getSet(set).RenameLocation(fromLocation, toLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subset produces a new locSet containing only Items and Locations
|
||||||
|
// whose location matches the locationPfx
|
||||||
|
func (id *InDeets) Subset(set, locationPfx string) *locSet {
|
||||||
|
return id.getSet(set).Subset(locationPfx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// whatSet helpers for extracting a set identifier from an arbitrary repoRef
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type whatSet func(details.Entry) (string, error)
|
||||||
|
|
||||||
|
// common whatSet parser that extracts the service category from
|
||||||
|
// a repoRef.
|
||||||
|
func CategoryFromRepoRef(ent details.Entry) (string, error) {
|
||||||
|
p, err := path.FromDataLayerPath(ent.RepoRef, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Category().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// common whatSet parser that extracts the driveID from a repoRef.
|
||||||
|
func DriveIDFromRepoRef(ent details.Entry) (string, error) {
|
||||||
|
p, err := path.FromDataLayerPath(ent.RepoRef, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
odp, err := path.ToDrivePath(p)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return odp.DriveID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// helpers and comparators
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func CheckBackupDetails(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
backupID model.StableID,
|
||||||
|
ws whatSet,
|
||||||
|
ms *kopia.ModelStore,
|
||||||
|
ssr streamstore.Reader,
|
||||||
|
expect *InDeets,
|
||||||
|
// standard check is assert.Subset due to issues of external data cross-
|
||||||
|
// pollination. This should be true if the backup contains a unique directory
|
||||||
|
// of data.
|
||||||
|
mustEqualFolders bool,
|
||||||
|
) {
|
||||||
|
deets, result := GetDeetsInBackup(t, ctx, backupID, "", "", path.UnknownService, ws, ms, ssr)
|
||||||
|
|
||||||
|
t.Log("details entries in result")
|
||||||
|
|
||||||
|
for _, ent := range deets.Entries {
|
||||||
|
if ent.Folder == nil {
|
||||||
|
t.Log(ent.LocationRef)
|
||||||
|
t.Log(ent.ItemRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Truef(
|
||||||
|
t,
|
||||||
|
strings.HasPrefix(ent.RepoRef, expect.RRPrefix),
|
||||||
|
"all details should begin with the expected prefix\nwant: %s\ngot: %s",
|
||||||
|
expect.RRPrefix, ent.RepoRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
for set := range expect.Sets {
|
||||||
|
check := assert.Subsetf
|
||||||
|
|
||||||
|
if mustEqualFolders {
|
||||||
|
check = assert.ElementsMatchf
|
||||||
|
}
|
||||||
|
|
||||||
|
check(
|
||||||
|
t,
|
||||||
|
maps.Keys(result.Sets[set].Locations),
|
||||||
|
maps.Keys(expect.Sets[set].Locations),
|
||||||
|
"results in %s missing expected location", set)
|
||||||
|
|
||||||
|
for lr, items := range expect.Sets[set].Deleted {
|
||||||
|
_, ok := result.Sets[set].Locations[lr]
|
||||||
|
assert.Falsef(t, ok, "deleted location in %s found in result: %s", set, lr)
|
||||||
|
|
||||||
|
for ir := range items {
|
||||||
|
_, ok := result.Sets[set].Locations[lr][ir]
|
||||||
|
assert.Falsef(t, ok, "deleted item in %s found in result: %s", set, lr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDeetsInBackup(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
backupID model.StableID,
|
||||||
|
tid, resourceOwner string,
|
||||||
|
service path.ServiceType,
|
||||||
|
ws whatSet,
|
||||||
|
ms *kopia.ModelStore,
|
||||||
|
ssr streamstore.Reader,
|
||||||
|
) (details.Details, *InDeets) {
|
||||||
|
bup := backup.Backup{}
|
||||||
|
|
||||||
|
err := ms.Get(ctx, model.BackupSchema, backupID, &bup)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
ssid := bup.StreamStoreID
|
||||||
|
require.NotEmpty(t, ssid, "stream store ID")
|
||||||
|
|
||||||
|
var deets details.Details
|
||||||
|
err = ssr.Read(
|
||||||
|
ctx,
|
||||||
|
ssid,
|
||||||
|
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
id := NewInDeets(path.Builder{}.Append(tid, service.String(), resourceOwner).String())
|
||||||
|
id.AddAll(deets, ws)
|
||||||
|
|
||||||
|
return deets, id
|
||||||
|
}
|
||||||
445
src/pkg/backup/details/testdata/in_deets_test.go
vendored
Normal file
445
src/pkg/backup/details/testdata/in_deets_test.go
vendored
Normal file
@ -0,0 +1,445 @@
|
|||||||
|
package testdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LocSetUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocSetUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &LocSetUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
l1 = "lr_1"
|
||||||
|
l2 = "lr_2"
|
||||||
|
l13 = "lr_1/lr_3"
|
||||||
|
l14 = "lr_1/lr_4"
|
||||||
|
i1 = "ir_1"
|
||||||
|
i2 = "ir_2"
|
||||||
|
i3 = "ir_3"
|
||||||
|
i4 = "ir_4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestAdd() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddLocation(l2)
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{l1, l2}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestRemove() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14]))
|
||||||
|
|
||||||
|
// nop removal
|
||||||
|
ls.RemoveItem(l2, i1)
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
|
||||||
|
// item removal
|
||||||
|
ls.RemoveItem(l1, i2)
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
|
||||||
|
// nop location removal
|
||||||
|
ls.RemoveLocation(l2)
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
|
||||||
|
// non-cascading location removal
|
||||||
|
ls.RemoveLocation(l13)
|
||||||
|
assert.ElementsMatch(t, []string{l1, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[l14]))
|
||||||
|
|
||||||
|
// cascading location removal
|
||||||
|
ls.RemoveLocation(l1)
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestSubset() {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
locPfx string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop",
|
||||||
|
locPfx: l2,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
locPfx: l13,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l13}, maps.Keys(ss.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations[l13]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-cascading",
|
||||||
|
locPfx: l14,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l14}, maps.Keys(ss.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading",
|
||||||
|
locPfx: l1,
|
||||||
|
expect: func(t *testing.T, ss *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ss.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ss.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ss.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ss.Locations[l13]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
test.expect(t, ls.Subset(test.locPfx))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestRename() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop",
|
||||||
|
from: l2,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
from: l13,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, "foo", l14}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with items",
|
||||||
|
from: l14,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, "foo"}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading locations",
|
||||||
|
from: l1,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{"foo", "foo/lr_3", "foo/lr_4"}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations["foo"]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo/lr_3"]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations["foo/lr_4"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to existing location",
|
||||||
|
from: l14,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2, i3, i4}, maps.Keys(ls.Locations[l1]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
ls.RenameLocation(test.from, test.to)
|
||||||
|
test.expect(t, ls)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestItem() {
|
||||||
|
t := suite.T()
|
||||||
|
b4 := "bar/lr_4"
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
ls.AddItem(b4, "fnord")
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
item string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop item",
|
||||||
|
item: "floob",
|
||||||
|
from: l2,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2, "floob"}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nop origin",
|
||||||
|
item: i1,
|
||||||
|
from: "smarf",
|
||||||
|
to: l2,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["smarf"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "new location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: "fnords",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations["fnords"]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: l2,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.ElementsMatch(t, []string{i1}, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same location",
|
||||||
|
item: i1,
|
||||||
|
from: l1,
|
||||||
|
to: l1,
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[l1]))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
ls.MoveItem(test.from, test.to, test.item)
|
||||||
|
test.expect(t, ls)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *LocSetUnitSuite) TestMoveLocation() {
|
||||||
|
t := suite.T()
|
||||||
|
b4 := "bar/lr_4"
|
||||||
|
|
||||||
|
makeSet := func() *locSet {
|
||||||
|
ls := newLocSet()
|
||||||
|
|
||||||
|
ls.AddItem(l1, i1)
|
||||||
|
ls.AddItem(l1, i2)
|
||||||
|
ls.AddLocation(l13)
|
||||||
|
ls.AddItem(l14, i3)
|
||||||
|
ls.AddItem(l14, i4)
|
||||||
|
ls.AddItem(b4, "fnord")
|
||||||
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := makeSet()
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ts.Locations))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ts.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ts.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ts.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord"}, maps.Keys(ts.Locations[b4]))
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
from string
|
||||||
|
to string
|
||||||
|
expect func(*testing.T, *locSet)
|
||||||
|
expectNewLoc string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nop root",
|
||||||
|
from: l2,
|
||||||
|
to: "",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l2]))
|
||||||
|
},
|
||||||
|
expectNewLoc: l2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nop child",
|
||||||
|
from: l2,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo"]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["foo/"+l2]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/" + l2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no items",
|
||||||
|
from: l13,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
newLoc := "foo/lr_3"
|
||||||
|
assert.ElementsMatch(t, []string{l1, newLoc, l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[newLoc]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/lr_3",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with items",
|
||||||
|
from: l14,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
newLoc := "foo/lr_4"
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, newLoc, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[newLoc]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/lr_4",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cascading locations",
|
||||||
|
from: l1,
|
||||||
|
to: "foo",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
pfx := "foo/"
|
||||||
|
assert.ElementsMatch(t, []string{pfx + l1, pfx + l13, pfx + l14, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i1, i2}, maps.Keys(ls.Locations[pfx+l1]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[pfx+l13]))
|
||||||
|
assert.ElementsMatch(t, []string{i3, i4}, maps.Keys(ls.Locations[pfx+l14]))
|
||||||
|
},
|
||||||
|
expectNewLoc: "foo/" + l1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to existing location",
|
||||||
|
from: l14,
|
||||||
|
to: "bar",
|
||||||
|
expect: func(t *testing.T, ls *locSet) {
|
||||||
|
assert.ElementsMatch(t, []string{l1, l13, b4}, maps.Keys(ls.Locations))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations[l14]))
|
||||||
|
assert.Empty(t, maps.Keys(ls.Locations["bar"]))
|
||||||
|
assert.ElementsMatch(t, []string{"fnord", i3, i4}, maps.Keys(ls.Locations[b4]))
|
||||||
|
},
|
||||||
|
expectNewLoc: b4,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
ls := makeSet()
|
||||||
|
|
||||||
|
newLoc := ls.MoveLocation(test.from, test.to)
|
||||||
|
test.expect(t, ls)
|
||||||
|
assert.Equal(t, test.expectNewLoc, newLoc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -85,7 +85,7 @@ type Path interface {
|
|||||||
Category() CategoryType
|
Category() CategoryType
|
||||||
Tenant() string
|
Tenant() string
|
||||||
ResourceOwner() string
|
ResourceOwner() string
|
||||||
Folder(bool) string
|
Folder(escaped bool) string
|
||||||
Folders() Elements
|
Folders() Elements
|
||||||
Item() string
|
Item() string
|
||||||
// UpdateParent updates parent from old to new if the item/folder was
|
// UpdateParent updates parent from old to new if the item/folder was
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user