Fixups for comparing details to expected set (#3778)

Collection of fixes to get tests passing again including:
* ensure expected sets are tracked for the lifetime of the struct
* add check that will fail if there's no expected deets to compare against
* minor fixups to path building when reading in an entire details file
* trim down data sent for patch operations to only the data that's changed
* update how expected items are gathered -- just pull it from graph directly
* update expected written items for OneDrive -- most likely due to matching both test folders (e.x. `corso_incr_test_1` and `corso_incr_test_1/corso_incr_test_1`)

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [ ] 🌻 Feature
- [x] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [x] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

* closes #3777

#### Test Plan

- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-07-10 16:46:13 -07:00 committed by GitHub
parent 232050fb44
commit 87b8631037
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 306 additions and 249 deletions

View File

@ -3,10 +3,10 @@ package test_test
import (
"context"
"fmt"
"strings"
"testing"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -21,6 +21,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/exchange"
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
exchTD "github.com/alcionai/corso/src/internal/m365/exchange/testdata"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/resource"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
@ -343,10 +344,90 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
// },
}
rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory)
require.NoError(t, err, clues.ToCore(err))
// strip the category from the prefix; we primarily want the tenant and resource owner.
expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String())
mustGetExpectedContainerItems := func(
t *testing.T,
category path.CategoryType,
cr graph.ContainerResolver,
destName string,
) {
locRef := path.Builder{}.Append(destName)
if category == path.EmailCategory {
locRef = locRef.Append(destName)
}
containerID, ok := cr.LocationInCache(locRef.String())
require.True(t, ok, "dir %s found in %s cache", locRef.String(), category)
var (
err error
items []string
)
switch category {
case path.EmailCategory:
items, _, _, err = ac.Mail().GetAddedAndRemovedItemIDs(
ctx,
uidn.ID(),
containerID,
"",
toggles.ExchangeImmutableIDs,
true)
case path.EventsCategory:
items, _, _, err = ac.Events().GetAddedAndRemovedItemIDs(
ctx,
uidn.ID(),
containerID,
"",
toggles.ExchangeImmutableIDs,
true)
case path.ContactsCategory:
items, _, _, err = ac.Contacts().GetAddedAndRemovedItemIDs(
ctx,
uidn.ID(),
containerID,
"",
toggles.ExchangeImmutableIDs,
true)
}
require.NoError(
t,
err,
"getting items for category %s, container %s",
category,
locRef.String())
dest := dataset[category].dests[destName]
dest.locRef = locRef.String()
dest.containerID = containerID
dest.itemRefs = items
dataset[category].dests[destName] = dest
// Add the directory and all its ancestors to the cache so we can compare
// folders.
for len(locRef.Elements()) > 0 {
expectDeets.AddLocation(category.String(), locRef.String())
locRef = locRef.Dir()
}
for _, i := range dataset[category].dests[destName].itemRefs {
expectDeets.AddItem(category.String(), dest.locRef, i)
}
}
// populate initial test data
for category, gen := range dataset {
for destName := range gen.dests {
deets := generateContainerOfItems(
generateContainerOfItems(
t,
ctx,
ctrl,
@ -360,23 +441,18 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
2,
version.Backup,
gen.dbf)
itemRefs := []string{}
for _, ent := range deets.Entries {
if ent.Exchange == nil || ent.Folder != nil {
continue
}
if len(ent.ItemRef) > 0 {
itemRefs = append(itemRefs, ent.ItemRef)
}
}
cr := exchTD.PopulateContainerCache(
t,
ctx,
ac,
category,
uidn.ID(),
fault.New(true))
// save the item ids for building expectedDeets later on
cd := dataset[category].dests[destName]
cd.itemRefs = itemRefs
dataset[category].dests[destName] = cd
for destName := range gen.dests {
mustGetExpectedContainerItems(t, category, cr, destName)
}
}
@ -386,82 +462,6 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
// run the initial backup
runAndCheckBackup(t, ctx, &bo, mb, false)
rrPfx, err := path.ServicePrefix(acct.ID(), uidn.ID(), service, path.EmailCategory)
require.NoError(t, err, clues.ToCore(err))
// strip the category from the prefix; we primarily want the tenant and resource owner.
expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String())
bupDeets, _ := deeTD.GetDeetsInBackup(
t,
ctx,
bo.Results.BackupID,
acct.ID(),
uidn.ID(),
service,
whatSet,
bod.kms,
bod.sss)
// update the datasets with their location refs
for category, gen := range dataset {
for destName, cd := range gen.dests {
var longestLR string
for _, ent := range bupDeets.Entries {
// generated destinations should always contain items
if ent.Folder != nil {
continue
}
p, err := path.FromDataLayerPath(ent.RepoRef, false)
require.NoError(t, err, clues.ToCore(err))
// category must match, and the owning folder must be this destination
if p.Category() != category || strings.HasSuffix(ent.LocationRef, destName) {
continue
}
// emails, due to folder nesting and our design for populating data via restore,
// will duplicate the dest folder as both the restore destination, and the "old parent
// folder". we'll get both a prefix/destName and a prefix/destName/destName folder.
// since we want future comparison to only use the leaf dir, we select for the longest match.
if len(ent.LocationRef) > len(longestLR) {
longestLR = ent.LocationRef
}
}
require.NotEmptyf(
t,
longestLR,
"must find a details entry matching the generated %s container: %s",
category,
destName)
cd.locRef = longestLR
dataset[category].dests[destName] = cd
expectDeets.AddLocation(category.String(), cd.locRef)
for _, i := range dataset[category].dests[destName].itemRefs {
expectDeets.AddItem(category.String(), cd.locRef, i)
}
}
}
// verify test data was populated, and track it for comparisons
// TODO: this can be swapped out for InDeets checks if we add itemRefs to folder ents.
for category, gen := range dataset {
cr := exchTD.PopulateContainerCache(t, ctx, ac, category, uidn.ID(), fault.New(true))
for destName, dest := range gen.dests {
id, ok := cr.LocationInCache(dest.locRef)
require.True(t, ok, "dir %s found in %s cache", dest.locRef, category)
dest.containerID = id
dataset[category].dests[destName] = dest
}
}
// precheck to ensure the expectedDeets are correct.
// if we fail here, the expectedDeets were populated incorrectly.
deeTD.CheckBackupDetails(
@ -514,7 +514,22 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
require.NoError(t, err, clues.ToCore(err))
newLoc := expectDeets.MoveLocation(cat.String(), from.locRef, to.locRef)
// Remove ancestor folders of moved directory since they'll no longer
// appear in details since we're not backing up items in them.
pb, err := path.Builder{}.SplitUnescapeAppend(from.locRef)
require.NoError(t, err, "getting Builder for location: %s", clues.ToCore(err))
pb = pb.Dir()
for len(pb.Elements()) > 0 {
expectDeets.RemoveLocation(cat.String(), pb.String())
pb = pb.Dir()
}
// Update cache with new location of container.
from.locRef = newLoc
dataset[cat].dests[container2] = from
},
deltaItemsRead: 0, // zero because we don't count container reads
deltaItemsWritten: 2,
@ -553,7 +568,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
name: "add a new folder",
updateUserData: func(t *testing.T, ctx context.Context) {
for category, gen := range dataset {
deets := generateContainerOfItems(
generateContainerOfItems(
t,
ctx,
ctrl,
@ -565,27 +580,8 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
version.Backup,
gen.dbf)
expectedLocRef := container3
if category == path.EmailCategory {
expectedLocRef = path.Builder{}.Append(container3, container3).String()
}
cr := exchTD.PopulateContainerCache(t, ctx, ac, category, uidn.ID(), fault.New(true))
id, ok := cr.LocationInCache(expectedLocRef)
require.Truef(t, ok, "dir %s found in %s cache", expectedLocRef, category)
dataset[category].dests[container3] = contDeets{
containerID: id,
locRef: expectedLocRef,
itemRefs: nil, // not needed at this point
}
for _, ent := range deets.Entries {
if ent.Folder == nil {
expectDeets.AddItem(category.String(), expectedLocRef, ent.ItemRef)
}
}
mustGetExpectedContainerItems(t, category, cr, container3)
}
},
deltaItemsRead: 4,
@ -612,31 +608,28 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
expectDeets.RenameLocation(
category.String(),
d.dests[container3].containerID,
d.dests[container3].locRef,
newLoc)
switch category {
case path.EmailCategory:
body, err := ac.Mail().GetFolder(ctx, uidn.ID(), containerID)
require.NoError(t, err, clues.ToCore(err))
body := models.NewMailFolder()
body.SetDisplayName(ptr.To(containerRename))
body.SetDisplayName(&containerRename)
err = ac.Mail().PatchFolder(ctx, uidn.ID(), containerID, body)
err := ac.Mail().PatchFolder(ctx, uidn.ID(), containerID, body)
require.NoError(t, err, clues.ToCore(err))
case path.ContactsCategory:
body, err := ac.Contacts().GetFolder(ctx, uidn.ID(), containerID)
require.NoError(t, err, clues.ToCore(err))
body := models.NewContactFolder()
body.SetDisplayName(ptr.To(containerRename))
body.SetDisplayName(&containerRename)
err = ac.Contacts().PatchFolder(ctx, uidn.ID(), containerID, body)
require.NoError(t, err, clues.ToCore(err))
case path.EventsCategory:
body, err := ac.Events().GetCalendar(ctx, uidn.ID(), containerID)
require.NoError(t, err, clues.ToCore(err))
body := models.NewCalendar()
body.SetName(ptr.To(containerRename))
body.SetName(&containerRename)
err = ac.Events().PatchCalendar(ctx, uidn.ID(), containerID, body)
require.NoError(t, err, clues.ToCore(err))
}
@ -667,7 +660,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
expectDeets.AddItem(
category.String(),
d.dests[category.String()].locRef,
d.dests[container1].locRef,
ptr.Val(itm.GetId()))
case path.ContactsCategory:
@ -680,7 +673,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
expectDeets.AddItem(
category.String(),
d.dests[category.String()].locRef,
d.dests[container1].locRef,
ptr.Val(itm.GetId()))
case path.EventsCategory:
@ -693,7 +686,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
expectDeets.AddItem(
category.String(),
d.dests[category.String()].locRef,
d.dests[container1].locRef,
ptr.Val(itm.GetId()))
}
}
@ -708,48 +701,29 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
name: "delete an existing item",
updateUserData: func(t *testing.T, ctx context.Context) {
for category, d := range dataset {
containerID := d.dests[container1].containerID
containerInfo := d.dests[container1]
require.NotEmpty(t, containerInfo.itemRefs)
id := containerInfo.itemRefs[0]
switch category {
case path.EmailCategory:
ids, _, _, err := ac.Mail().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true)
require.NoError(t, err, "getting message ids", clues.ToCore(err))
require.NotEmpty(t, ids, "message ids in folder")
err = ac.Mail().DeleteItem(ctx, uidn.ID(), ids[0])
require.NoError(t, err, "deleting email item", clues.ToCore(err))
expectDeets.RemoveItem(
category.String(),
d.dests[category.String()].locRef,
ids[0])
err = ac.Mail().DeleteItem(ctx, uidn.ID(), id)
case path.ContactsCategory:
ids, _, _, err := ac.Contacts().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true)
require.NoError(t, err, "getting contact ids", clues.ToCore(err))
require.NotEmpty(t, ids, "contact ids in folder")
err = ac.Contacts().DeleteItem(ctx, uidn.ID(), ids[0])
require.NoError(t, err, "deleting contact item", clues.ToCore(err))
expectDeets.RemoveItem(
category.String(),
d.dests[category.String()].locRef,
ids[0])
err = ac.Contacts().DeleteItem(ctx, uidn.ID(), id)
case path.EventsCategory:
ids, _, _, err := ac.Events().GetAddedAndRemovedItemIDs(ctx, uidn.ID(), containerID, "", false, true)
require.NoError(t, err, "getting event ids", clues.ToCore(err))
require.NotEmpty(t, ids, "event ids in folder")
err = ac.Events().DeleteItem(ctx, uidn.ID(), ids[0])
require.NoError(t, err, "deleting calendar", clues.ToCore(err))
expectDeets.RemoveItem(
category.String(),
d.dests[category.String()].locRef,
ids[0])
err = ac.Events().DeleteItem(ctx, uidn.ID(), id)
}
require.NoError(
t,
err,
"deleting %s item: %s",
category.String(),
clues.ToCore(err))
expectDeets.RemoveItem(category.String(), containerInfo.locRef, id)
}
},
deltaItemsRead: 2,

View File

@ -204,9 +204,9 @@ func runDriveIncrementalTest(
fileDBF = func(id, timeStamp, subject, body string) []byte {
return []byte(id + subject)
}
makeLocRef = func(flds ...string) string {
elems := append([]string{driveID, "root:"}, flds...)
return path.Builder{}.Append(elems...).String()
makeLocRef = func(flds ...string) *path.Builder {
elems := append([]string{"root:"}, flds...)
return path.Builder{}.Append(elems...)
}
)
@ -216,6 +216,68 @@ func runDriveIncrementalTest(
// strip the category from the prefix; we primarily want the tenant and resource owner.
expectDeets := deeTD.NewInDeets(rrPfx.ToBuilder().Dir().String())
type containerInfo struct {
id string
locRef *path.Builder
}
containerInfos := map[string]containerInfo{}
mustGetExpectedContainerItems := func(
t *testing.T,
driveID, destName string,
locRef *path.Builder,
) {
// Use path-based indexing to get the folder's ID.
itemURL := fmt.Sprintf(
"https://graph.microsoft.com/v1.0/drives/%s/root:/%s",
driveID,
locRef.String())
resp, err := drives.
NewItemItemsDriveItemItemRequestBuilder(itemURL, ctrl.AC.Stable.Adapter()).
Get(ctx, nil)
require.NoError(
t,
err,
"getting drive folder ID for %s: %v",
locRef.String(),
clues.ToCore(err))
containerInfos[destName] = containerInfo{
id: ptr.Val(resp.GetId()),
locRef: makeLocRef(locRef.Elements()...),
}
dest := containerInfos[destName]
items, err := ac.GetItemsInContainerByCollisionKey(
ctx,
driveID,
ptr.Val(resp.GetId()))
require.NoError(
t,
err,
"getting container %s items: %v",
locRef.String(),
clues.ToCore(err))
// Add the directory and all its ancestors to the cache so we can compare
// folders.
for pb := dest.locRef; len(pb.Elements()) > 0; pb = pb.Dir() {
expectDeets.AddLocation(driveID, pb.String())
}
for _, item := range items {
if item.IsFolder {
continue
}
expectDeets.AddItem(
driveID,
dest.locRef.String(),
item.ItemID)
}
}
// Populate initial test data.
// Generate 2 new folders with two items each. Only the first two
// folders will be part of the initial backup and
@ -223,7 +285,7 @@ func runDriveIncrementalTest(
// through the changes. This should be enough to cover most delta
// actions.
for _, destName := range genDests {
deets := generateContainerOfItems(
generateContainerOfItems(
t,
ctx,
ctrl,
@ -236,28 +298,13 @@ func runDriveIncrementalTest(
0,
fileDBF)
for _, ent := range deets.Entries {
if ent.Folder != nil {
continue
}
expectDeets.AddItem(driveID, makeLocRef(destName), ent.ItemRef)
}
}
containerIDs := map[string]string{}
// verify test data was populated, and track it for comparisons
for _, destName := range genDests {
// Use path-based indexing to get the folder's ID. This is sourced from the
// onedrive package `getFolder` function.
itemURL := fmt.Sprintf("https://graph.microsoft.com/v1.0/drives/%s/root:/%s", driveID, destName)
resp, err := drives.
NewItemItemsDriveItemItemRequestBuilder(itemURL, ctrl.AC.Stable.Adapter()).
Get(ctx, nil)
require.NoError(t, err, "getting drive folder ID", "folder name", destName, clues.ToCore(err))
containerIDs[destName] = ptr.Val(resp.GetId())
// The way we generate containers causes it to duplicate the destName.
locRef := path.Builder{}.Append(destName, destName)
mustGetExpectedContainerItems(
t,
driveID,
destName,
locRef)
}
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup)
@ -315,24 +362,24 @@ func runDriveIncrementalTest(
{
name: "create a new file",
updateFiles: func(t *testing.T, ctx context.Context) {
targetContainer := containerIDs[container1]
targetContainer := containerInfos[container1]
driveItem := models.NewDriveItem()
driveItem.SetName(&newFileName)
driveItem.SetFile(models.NewFile())
newFile, err = ac.PostItemInContainer(
ctx,
driveID,
targetContainer,
targetContainer.id,
driveItem,
control.Copy)
require.NoErrorf(t, err, "creating new file %v", clues.ToCore(err))
newFileID = ptr.Val(newFile.GetId())
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
expectDeets.AddItem(driveID, targetContainer.locRef.String(), newFileID)
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsWritten: 4, // .data and .meta for newitem, .dirmeta for parent and ancestor
nonMetaItemsWritten: 1, // .data file for newitem
},
{
@ -350,7 +397,7 @@ func runDriveIncrementalTest(
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsWritten: 3, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, // the file for which permission was updated
},
{
@ -368,13 +415,13 @@ func runDriveIncrementalTest(
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsWritten: 3, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, //.data file for newitem
},
{
name: "add permission to container",
updateFiles: func(t *testing.T, ctx context.Context) {
targetContainer := containerIDs[container1]
targetContainer := containerInfos[container1].id
err = onedrive.UpdatePermissions(
ctx,
rh,
@ -387,13 +434,13 @@ func runDriveIncrementalTest(
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsWritten: 2, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated as update on container
},
{
name: "remove permission from container",
updateFiles: func(t *testing.T, ctx context.Context) {
targetContainer := containerIDs[container1]
targetContainer := containerInfos[container1].id
err = onedrive.UpdatePermissions(
ctx,
rh,
@ -406,7 +453,7 @@ func runDriveIncrementalTest(
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsWritten: 2, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated
},
{
@ -421,20 +468,15 @@ func runDriveIncrementalTest(
// no expectedDeets: neither file id nor location changed
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsWritten: 4, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
},
{
name: "rename a file",
updateFiles: func(t *testing.T, ctx context.Context) {
container := containerIDs[container1]
driveItem := models.NewDriveItem()
name := "renamed_new_file.txt"
driveItem.SetName(&name)
parentRef := models.NewItemReference()
parentRef.SetId(&container)
driveItem.SetParentReference(parentRef)
err := ac.PatchItem(
ctx,
@ -444,19 +486,18 @@ func runDriveIncrementalTest(
require.NoError(t, err, "renaming file %v", clues.ToCore(err))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsWritten: 4, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
// no expectedDeets: neither file id nor location changed
},
{
name: "move a file between folders",
updateFiles: func(t *testing.T, ctx context.Context) {
dest := containerIDs[container2]
dest := containerInfos[container2]
driveItem := models.NewDriveItem()
driveItem.SetName(&newFileName)
parentRef := models.NewItemReference()
parentRef.SetId(&dest)
parentRef.SetId(&dest.id)
driveItem.SetParentReference(parentRef)
err := ac.PatchItem(
@ -468,12 +509,12 @@ func runDriveIncrementalTest(
expectDeets.MoveItem(
driveID,
makeLocRef(container1),
makeLocRef(container2),
containerInfos[container1].locRef.String(),
dest.locRef.String(),
ptr.Val(newFile.GetId()))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsWritten: 4, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
},
{
@ -485,7 +526,10 @@ func runDriveIncrementalTest(
ptr.Val(newFile.GetId()))
require.NoErrorf(t, err, "deleting file %v", clues.ToCore(err))
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
expectDeets.RemoveItem(
driveID,
containerInfos[container2].locRef.String(),
ptr.Val(newFile.GetId()))
},
itemsRead: 0,
itemsWritten: 0,
@ -494,26 +538,34 @@ func runDriveIncrementalTest(
{
name: "move a folder to a subfolder",
updateFiles: func(t *testing.T, ctx context.Context) {
parent := containerIDs[container1]
child := containerIDs[container2]
parent := containerInfos[container1]
child := containerInfos[container2]
driveItem := models.NewDriveItem()
driveItem.SetName(&container2)
parentRef := models.NewItemReference()
parentRef.SetId(&parent)
parentRef.SetId(&parent.id)
driveItem.SetParentReference(parentRef)
err := ac.PatchItem(
ctx,
driveID,
child,
child.id,
driveItem)
require.NoError(t, err, "moving folder", clues.ToCore(err))
expectDeets.MoveLocation(
driveID,
makeLocRef(container2),
makeLocRef(container1))
child.locRef.String(),
parent.locRef.String())
// Remove parent of moved folder since it's now empty.
expectDeets.RemoveLocation(driveID, child.locRef.Dir().String())
// Update in-memory cache with new location.
child.locRef = path.Builder{}.Append(append(
parent.locRef.Elements(),
child.locRef.LastElem())...)
containerInfos[container2] = child
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
@ -522,28 +574,29 @@ func runDriveIncrementalTest(
{
name: "rename a folder",
updateFiles: func(t *testing.T, ctx context.Context) {
parent := containerIDs[container1]
child := containerIDs[container2]
child := containerInfos[container2]
driveItem := models.NewDriveItem()
driveItem.SetName(&containerRename)
parentRef := models.NewItemReference()
parentRef.SetId(&parent)
driveItem.SetParentReference(parentRef)
err := ac.PatchItem(
ctx,
driveID,
child,
child.id,
driveItem)
require.NoError(t, err, "renaming folder", clues.ToCore(err))
containerIDs[containerRename] = containerIDs[container2]
containerInfos[containerRename] = containerInfo{
id: child.id,
locRef: child.locRef.Dir().Append(containerRename),
}
expectDeets.RenameLocation(
driveID,
makeLocRef(container1, container2),
makeLocRef(container1, containerRename))
child.locRef.String(),
containerInfos[containerRename].locRef.String())
delete(containerInfos, container2)
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
@ -552,14 +605,16 @@ func runDriveIncrementalTest(
{
name: "delete a folder",
updateFiles: func(t *testing.T, ctx context.Context) {
container := containerIDs[containerRename]
container := containerInfos[containerRename]
err := ac.DeleteItem(
ctx,
driveID,
container)
container.id)
require.NoError(t, err, "deleting folder", clues.ToCore(err))
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
expectDeets.RemoveLocation(driveID, container.locRef.String())
delete(containerInfos, containerRename)
},
itemsRead: 0,
itemsWritten: 0,
@ -580,18 +635,12 @@ func runDriveIncrementalTest(
0,
fileDBF)
// Validate creation
itemURL := fmt.Sprintf(
"https://graph.microsoft.com/v1.0/drives/%s/root:/%s",
locRef := path.Builder{}.Append(container3, container3)
mustGetExpectedContainerItems(
t,
driveID,
container3)
resp, err := drives.NewItemItemsDriveItemItemRequestBuilder(itemURL, ctrl.AC.Stable.Adapter()).
Get(ctx, nil)
require.NoError(t, err, "getting drive folder ID", "folder name", container3, clues.ToCore(err))
containerIDs[container3] = ptr.Val(resp.GetId())
expectDeets.AddLocation(driveID, container3)
container3,
locRef)
},
itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta

View File

@ -172,11 +172,12 @@ func NewInDeets(repoRefPrefix string) *InDeets {
func (id *InDeets) getSet(set string) *locSet {
s, ok := id.Sets[set]
if ok {
return s
if !ok {
id.Sets[set] = newLocSet()
s = id.Sets[set]
}
return newLocSet()
return s
}
func (id *InDeets) AddAll(deets details.Details, ws whatSet) {
@ -193,8 +194,13 @@ func (id *InDeets) AddAll(deets details.Details, ws whatSet) {
dir := ent.LocationRef
if ent.Folder != nil {
dir = dir + ent.Folder.DisplayName
id.AddLocation(set, dir)
pb, err := path.Builder{}.SplitUnescapeAppend(dir)
if err != nil {
id.AddLocation(set, err.Error())
}
pb = pb.Append(ent.Folder.DisplayName)
id.AddLocation(set, pb.String())
} else {
id.AddItem(set, ent.LocationRef, ent.ItemRef)
}
@ -310,6 +316,11 @@ func CheckBackupDetails(
expect.RRPrefix, ent.RepoRef)
}
// Basic check to try to ensure we're checking things when we should be.
if len(deets.Entries) > 0 {
require.NotEmpty(t, expect.Sets, "expected non-empty sets to compare")
}
for set := range expect.Sets {
check := assert.Subsetf
@ -317,15 +328,38 @@ func CheckBackupDetails(
check = assert.ElementsMatchf
}
// Compare folders.
check(
t,
maps.Keys(result.Sets[set].Locations),
maps.Keys(expect.Sets[set].Locations),
maps.Keys(result.Sets[set].Locations),
"results in %s missing expected location", set)
// Compare items in folders.
for lr, items := range expect.Sets[set].Locations {
check(
t,
maps.Keys(items),
maps.Keys(result.Sets[set].Locations[lr]),
"results in set %s location %s missing expected item",
set,
lr)
}
// Ensure deleted items aren't present.
for lr, items := range expect.Sets[set].Deleted {
// Only report an error for an undeleted container if it's not also
// expected in Locations. We could have the container in both sets if one
// of several items was deleted.
_, lok := expect.Sets[set].Locations[lr]
_, ok := result.Sets[set].Locations[lr]
assert.Falsef(t, ok, "deleted location in %s found in result: %s", set, lr)
assert.Falsef(
t,
!lok && ok,
"deleted location in %s found in result (expected normally %v): %s",
set,
lok,
lr)
for ir := range items {
_, ok := result.Sets[set].Locations[lr][ir]