fix up test fullPath generation (#4833)

fixes up the generation of paths in drive testing so that paths are created correctly and consistently, even for values that will have escaped slashes.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🤖 Supportability/Tests

#### Issue(s)

* #4689

#### Test Plan

- [x]  Unit test
This commit is contained in:
Keepers 2023-12-15 14:44:26 -07:00 committed by GitHub
parent e3363aaa46
commit 44078e1db2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1667 additions and 1012 deletions

File diff suppressed because it is too large Load Diff

View File

@ -221,6 +221,7 @@ func (c *Collections) makeDriveCollections(
ctx,
tree,
drv,
prevPaths,
prevDeltaLink,
countPagesInDelta,
errs)
@ -457,6 +458,14 @@ func (c *Collections) enumeratePageOfItems(
return err
}
// special case: we only want to add a limited number of files
// to each collection. But if one collection fills up, we don't
// want to break out of the whole backup. That allows us to preview
// many folders with a small selection of files in each.
if errors.Is(err, errHitCollectionLimit) {
continue
}
el.AddRecoverable(ictx, clues.Wrap(err, "adding folder"))
}
}
@ -479,8 +488,6 @@ func (c *Collections) addFolderToTree(
isDeleted = folder.GetDeleted() != nil
isMalware = folder.GetMalware() != nil
isPkg = folder.GetPackageEscaped() != nil
parent = folder.GetParentReference()
parentID string
notSelected bool
)
@ -489,10 +496,6 @@ func (c *Collections) addFolderToTree(
return nil, errHitLimit
}
if parent != nil {
parentID = ptr.Val(parent.GetId())
}
defer func() {
switch {
case notSelected:
@ -525,7 +528,7 @@ func (c *Collections) addFolderToTree(
}
if isDeleted {
err := tree.setTombstone(ctx, folderID)
err := tree.setTombstone(ctx, folder)
return nil, clues.Stack(err).OrNil()
}
@ -541,7 +544,7 @@ func (c *Collections) addFolderToTree(
return nil, nil
}
err = tree.setFolder(ctx, parentID, folderID, folderName, isPkg)
err = tree.setFolder(ctx, folder)
return nil, clues.Stack(err).OrNil()
}
@ -635,22 +638,32 @@ func (c *Collections) addFileToTree(
if parentNotNil && !alreadySeen {
countSize := tree.countLiveFilesAndSizes()
// Tell the enumerator to exit if we've already hit the total
// limit of bytes or items in this backup.
if limiter.alreadyHitTotalBytesLimit(countSize.totalBytes) ||
limiter.hitItemLimit(countSize.numFiles) {
return nil, errHitLimit
}
// Don't add new items if the new collection has already reached it's limit.
// item moves and updates are generally allowed through.
if limiter.atContainerItemsLimit(len(parentNode.files)) || limiter.hitItemLimit(countSize.numFiles) {
return nil, errHitLimit
if limiter.atContainerItemsLimit(len(parentNode.files)) {
return nil, errHitCollectionLimit
}
// Skip large files that don't fit within the size limit.
// unlike the other checks, which see if we're already at the limit, this check
// needs to be forward-facing to ensure we don't go far over the limit.
// Don't include large files that don't fit within the size limit.
// Unlike the other checks, which see if we're already at the limit,
// this check needs to be forward-facing to ensure we don't go far
// over the limit
// Example case: a 1gb limit and a 25gb file.
if limiter.hitTotalBytesLimit(fileSize + countSize.totalBytes) {
return nil, errHitLimit
if limiter.willStepOverBytesLimit(countSize.totalBytes, fileSize) {
// don't return errHitLimit here; we only want to skip the
// current file. We may not want to skip files after it.
return nil, nil
}
}
err := tree.addFile(parentID, fileID, file)
err := tree.addFile(file)
if err != nil {
return nil, clues.StackWC(ctx, err)
}
@ -776,6 +789,7 @@ func (c *Collections) turnTreeIntoCollections(
ctx context.Context,
tree *folderyMcFolderFace,
drv models.Driveable,
prevPaths map[string]string,
prevDeltaLink string,
countPagesInDelta int,
errs *fault.Bus,
@ -793,7 +807,6 @@ func (c *Collections) turnTreeIntoCollections(
var (
collections = []data.BackupCollection{}
newPrevPaths = map[string]string{}
uc *urlCache
el = errs.Local()
driveID = ptr.Val(drv.GetId())
@ -825,15 +838,11 @@ func (c *Collections) turnTreeIntoCollections(
}
}
for id, cbl := range collectables {
for _, cbl := range collectables {
if el.Failure() != nil {
break
}
if cbl.currPath != nil {
newPrevPaths[id] = cbl.currPath.String()
}
coll, err := NewCollection(
c.handler,
c.protectedResource,
@ -856,5 +865,16 @@ func (c *Collections) turnTreeIntoCollections(
collections = append(collections, coll)
}
return collections, newPrevPaths, tree.generateExcludeItemIDs(), el.Failure()
if el.Failure() != nil {
return nil, nil, nil, el.Failure()
}
// use the collectables and old previous paths
// to generate new previous paths
newPrevPaths, err := tree.generateNewPreviousPaths(collectables, prevPaths)
if err != nil {
return nil, nil, nil, clues.WrapWC(ctx, err, "generating new previous paths")
}
return collections, newPrevPaths, tree.generateExcludeItemIDs(), nil
}

View File

@ -237,6 +237,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_GetTree() {
// to ensure we stitch the parts together correctly.
func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d := drive()
t := suite.T()
table := []struct {
name string
@ -265,7 +266,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
delta(id(deltaURL), nil).with(
aPage()))),
prevPaths: map[string]string{
folderID(): d.strPath(folderName()),
folderID(): d.strPath(t, folderName()),
},
expectCounts: countTD.Expected{
count.PrevPaths: 1,
@ -277,7 +278,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
enumerator: driveEnumerator(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot(), d.fileAt(folder))))),
aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{},
expectCounts: countTD.Expected{
count.PrevPaths: 0,
@ -289,9 +292,11 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
enumerator: driveEnumerator(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot(), d.fileAt(folder))))),
aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{
folderID(): d.strPath(folderName()),
folderID(): d.strPath(t, folderName()),
},
expectCounts: countTD.Expected{
count.PrevPaths: 1,
@ -319,7 +324,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
aReset(),
aPage()))),
prevPaths: map[string]string{
folderID(): d.strPath(folderName()),
folderID(): d.strPath(t, folderName()),
},
expectCounts: countTD.Expected{
count.PrevPaths: 1,
@ -332,7 +337,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d.newEnumer().with(
deltaWReset(id(deltaURL), nil).with(
aReset(),
aPage(d.folderAtRoot(), d.fileAt(folder))))),
aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{},
expectCounts: countTD.Expected{
count.PrevPaths: 0,
@ -345,9 +352,11 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d.newEnumer().with(
deltaWReset(id(deltaURL), nil).with(
aReset(),
aPage(d.folderAtRoot(), d.fileAt(folder))))),
aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{
folderID(): d.strPath(folderName()),
folderID(): d.strPath(t, folderName()),
},
expectCounts: countTD.Expected{
count.PrevPaths: 1,
@ -384,6 +393,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors() {
d := drive()
t := suite.T()
table := []struct {
name string
@ -395,8 +405,8 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "no error - normal usage",
tree: treeWithFolders,
prevPaths: map[string]string{
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(t, folderName("parent"), folderName()),
},
expectErr: require.NoError,
},
@ -410,7 +420,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "no error - folder not visited in this delta",
tree: treeWithFolders,
prevPaths: map[string]string{
id("santa"): d.strPath(name("santa")),
id("santa"): d.strPath(t, name("santa")),
},
expectErr: require.NoError,
},
@ -418,7 +428,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "empty key in previous paths",
tree: treeWithFolders,
prevPaths: map[string]string{
"": d.strPath(folderName("parent")),
"": d.strPath(t, folderName("parent")),
},
expectErr: require.Error,
},
@ -460,6 +470,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections() {
d := drive()
t := suite.T()
type expected struct {
prevPaths map[string]string
@ -481,9 +492,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
enableURLCache: true,
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
rootID: d.strPath(t),
folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(t, folderName("parent"), folderName()),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
@ -500,13 +511,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl(
d.fullPath(t, folderName("parent"), folderName()),
nil,
fileID()))
fileID("f")))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID()),
fileID("f")),
},
},
{
@ -514,16 +525,20 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
tree: fullTree,
enableURLCache: true,
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent-prev")),
folderID(): d.strPath(folderName("parent-prev"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone-prev")),
rootID: d.strPath(t),
folderID("parent"): d.strPath(t, folderName("parent-prev")),
folderID(): d.strPath(t, folderName("parent-prev"), folderName()),
folderID("prev"): d.strPath(t, folderName("parent-prev"), folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("parent-prev"), folderName("prev"), folderName("prev-chld")),
folderID("tombstone"): d.strPath(t, folderName("tombstone-prev")),
},
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
rootID: d.strPath(t),
folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("prev"): d.strPath(t, folderName("parent"), folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("parent"), folderName("prev"), folderName("prev-chld")),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
@ -540,31 +555,35 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl(
d.fullPath(t, folderName("parent"), folderName()),
d.fullPath(t, folderName("parent-prev"), folderName()),
fileID()),
fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone-prev"))))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID()),
fileID("f")),
},
},
{
name: "all folders moved - todo: path separator string check",
tree: fullTreeWithNames("parent", "tombstone"),
name: "all folders moved - path separator string check",
tree: fullTreeWithNames("pa/rent", "to/mbstone"),
enableURLCache: true,
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent-prev")),
folderID(): d.strPath(folderName("parent-prev"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone-prev")),
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("parent/prev")),
folderID(): d.strPath(t, folderName("parent/prev"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("parent/prev"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("parent/prev"), folderName("pr/ev"), folderName("prev/chld")),
folderID("to/mbstone"): d.strPath(t, folderName("tombstone/prev")),
},
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
@ -575,37 +594,45 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
d.fullPath(t),
fileID("r")),
aColl(
d.fullPath(t, folderName("parent")),
d.fullPath(t, folderName("parent-prev")),
d.fullPath(t, folderName("pa/rent")),
d.fullPath(t, folderName("parent/prev")),
fileID("p")),
aColl(
d.fullPath(t, folderName("parent"), folderName()),
d.fullPath(t, folderName("parent-prev"), folderName()),
fileID()),
aColl(nil, d.fullPath(t, folderName("tombstone-prev"))))
d.fullPath(t, folderName("pa/rent"), folderName()),
d.fullPath(t, folderName("parent/prev"), folderName()),
fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone/prev"))))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID()),
fileID("f")),
},
},
{
name: "no folders moved",
name: "nothing in the tree was moved " +
"but there were some folders in the previous paths that " +
"didn't appear in the delta so those have to appear in the " +
"new previous paths but those weren't moved either so " +
"everything should have the same path at the end",
tree: fullTree,
enableURLCache: true,
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone")),
rootID: d.strPath(t),
folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("tombstone"): d.strPath(t, folderName("tombstone")),
folderID("prev"): d.strPath(t, folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("prev"), folderName("prev-chld")),
},
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(),
folderID("parent"): d.strPath(folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()),
rootID: d.strPath(t),
folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("prev"): d.strPath(t, folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("prev"), folderName("prev-chld")),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
@ -622,14 +649,64 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl(
d.fullPath(t, folderName("parent"), folderName()),
d.fullPath(t, folderName("parent"), folderName()),
fileID()),
fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone"))))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID()),
fileID("f")),
},
},
{
name: "nothing in the tree was moved " +
"but there were some folders in the previous paths that " +
"didn't appear in the delta so those have to appear in the " +
"new previous paths but those weren't moved either so " +
"everything should have the same path at the end " +
"- the version with path separators chars in the directory names",
tree: fullTreeWithNames("pa/rent", "to/mbstone"),
enableURLCache: true,
prevPaths: map[string]string{
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
folderID("to/mbstone"): d.strPath(t, folderName("to/mbstone")),
},
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
false,
true,
aColl(
d.fullPath(t),
d.fullPath(t),
fileID("r")),
aColl(
d.fullPath(t, folderName("pa/rent")),
d.fullPath(t, folderName("pa/rent")),
fileID("p")),
aColl(
d.fullPath(t, folderName("pa/rent"), folderName()),
d.fullPath(t, folderName("pa/rent"), folderName()),
fileID("f")),
aColl(nil, d.fullPath(t, folderName("to/mbstone"))))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID("f")),
},
},
}
@ -656,6 +733,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
ctx,
tree,
d.able,
test.prevPaths,
deltaURL,
countPages,
fault.New(true))
@ -782,10 +860,10 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
enumerator: driveEnumerator(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot()),
aPage(d.folderAtRoot("sib")),
aPage(d.folderAt(root)),
aPage(d.folderAt(root, "sib")),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.folderAt(folder, "chld"))))),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{
@ -815,13 +893,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder)),
aPage(
d.folderAtRoot("sib"),
d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(control.DefaultOptions()),
@ -917,7 +995,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder)),
aPage(delItem(folderID(), rootID, isFolder))))),
limiter: newPagerLimiter(control.DefaultOptions()),
@ -950,7 +1028,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot("parent"),
d.folderAt(root, "parent"),
driveItem(folderID(), folderName("moved"), d.dir(), folderID("parent"), isFolder),
driveFile(d.dir(folderName("parent"), folderName()), folderID())),
aPage(delItem(folderID(), folderID("parent"), isFolder))))),
@ -986,7 +1064,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
delta(id(deltaURL), nil).with(
aPage(delItem(folderID(), rootID, isFolder)),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{
@ -1018,7 +1096,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
delta(id(deltaURL), nil).with(
aPage(delItem(folderID(), rootID, isFolder)),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{
@ -1049,13 +1127,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder)),
aPage(
d.folderAtRoot("sib"),
d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(minimumLimitOpts()),
@ -1085,13 +1163,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder)),
aPage(
d.folderAtRoot("sib"),
d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(minimumLimitOpts()),
@ -1136,15 +1214,15 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with(
delta(id(deltaURL), nil).
with(aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))),
delta(id(deltaURL), nil).
with(aPage(
d.folderAtRoot("sib"),
d.folderAt(root, "sib"),
d.fileAt("sib", "fsib"))),
delta(id(deltaURL), nil).
with(aPage(
d.folderAtRoot(),
d.folderAt(root),
d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(control.DefaultOptions()),
@ -1182,7 +1260,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))),
// a (delete,create) pair in the same delta can occur when
// a user deletes and restores an item in-between deltas.
@ -1191,7 +1269,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
delItem(folderID(), rootID, isFolder),
delItem(fileID(), folderID(), isFile)),
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{
@ -1222,7 +1300,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder))),
delta(id(deltaURL), nil).with(
aPage(
@ -1260,7 +1338,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
delta(id(deltaURL), nil).with(
// first page: create /root/folder and /root/folder/file
aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder)),
// assume the user makes changes at this point:
// * create a new /root/folder
@ -1442,9 +1520,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "many folders in a hierarchy",
tree: treeWithRoot,
page: aPage(
d.folderAtRoot(),
d.folderAtRoot("sib"),
d.folderAt(folder, "chld")),
d.folderAt(root),
d.folderAt(folder, "chld"),
d.folderAt(root, "sib")),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
counts: countTD.Expected{
@ -1465,7 +1543,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "create->delete",
tree: treeWithRoot,
page: aPage(
d.folderAtRoot(),
d.folderAt(root),
delItem(folderID(), rootID, isFolder)),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
@ -1485,7 +1563,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "move->delete",
tree: treeWithFolders,
page: aPage(
d.folderAtRoot("parent"),
d.folderAt(root, "parent"),
driveItem(folderID(), folderName("moved"), d.dir(folderName("parent")), folderID("parent"), isFolder),
delItem(folderID(), folderID("parent"), isFolder)),
limiter: newPagerLimiter(control.DefaultOptions()),
@ -1510,7 +1588,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
tree: treeWithRoot,
page: aPage(
delItem(folderID(), rootID, isFolder),
d.folderAtRoot()),
d.folderAt(root)),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
counts: countTD.Expected{
@ -1531,7 +1609,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
tree: treeWithRoot,
page: aPage(
delItem(folderID(), rootID, isFolder),
d.folderAtRoot()),
d.folderAt(root)),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
counts: countTD.Expected{
@ -1596,7 +1674,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
func (suite *CollectionsTreeUnitSuite) TestCollections_AddFolderToTree() {
var (
d = drive()
fld = custom.ToCustomDriveItem(d.folderAtRoot())
fld = custom.ToCustomDriveItem(d.folderAt(root))
subFld = custom.ToCustomDriveItem(driveFolder(d.dir(folderName("parent")), folderID("parent")))
pack = custom.ToCustomDriveItem(driveItem(id(pkg), name(pkg), d.dir(), rootID, isPackage))
del = custom.ToCustomDriveItem(delItem(folderID(), rootID, isFolder))
@ -1871,13 +1949,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeFolderCollectionPath(
}{
{
name: "root",
folder: driveRootFolder(),
folder: rootFolder(),
expect: basePath.String(),
expectErr: require.NoError,
},
{
name: "folder",
folder: d.folderAtRoot(),
folder: d.folderAt(root),
expect: folderPath.String(),
expectErr: require.NoError,
},
@ -1935,7 +2013,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
{
name: "one file at root",
tree: treeWithRoot,
page: aPage(d.fileAtRoot()),
page: aPage(d.fileAt(root)),
expect: expected{
counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 0,
@ -1954,8 +2032,8 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "many files in a hierarchy",
tree: treeWithRoot,
page: aPage(
d.fileAtRoot(),
d.folderAtRoot(),
d.fileAt(root),
d.folderAt(root),
d.fileAt(folder, "fchld")),
expect: expected{
counts: countTD.Expected{
@ -1976,7 +2054,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "many updates to the same file",
tree: treeWithRoot,
page: aPage(
d.fileAtRoot(),
d.fileAt(root),
driveItem(fileID(), fileName(1), d.dir(), rootID, isFile),
driveItem(fileID(), fileName(2), d.dir(), rootID, isFile)),
expect: expected{
@ -2031,7 +2109,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "create->delete",
tree: treeWithRoot,
page: aPage(
d.fileAtRoot(),
d.fileAt(root),
delItem(fileID(), rootID, isFile)),
expect: expected{
counts: countTD.Expected{
@ -2049,7 +2127,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "move->delete",
tree: treeWithFileAtRoot,
page: aPage(
d.folderAtRoot(),
d.folderAt(root),
d.fileAt(folder),
delItem(fileID(), folderID(), isFile)),
expect: expected{
@ -2069,7 +2147,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
tree: treeWithFileAtRoot,
page: aPage(
delItem(fileID(), rootID, isFile),
d.fileAtRoot()),
d.fileAt(root)),
expect: expected{
counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 1,
@ -2089,7 +2167,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
tree: treeWithRoot,
page: aPage(
delItem(fileID(), rootID, isFile),
d.fileAtRoot()),
d.fileAt(root)),
expect: expected{
counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 1,
@ -2140,10 +2218,18 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
d := drive()
unlimitedItemsPerContainer := newPagerLimiter(minimumLimitOpts())
unlimitedItemsPerContainer.limits.MaxItemsPerContainer = 9001
unlimitedTotalBytesAndFiles := newPagerLimiter(minimumLimitOpts())
unlimitedTotalBytesAndFiles.limits.MaxBytes = 9001
unlimitedTotalBytesAndFiles.limits.MaxItems = 9001
type expected struct {
counts countTD.Expected
err require.ErrorAssertionFunc
shouldHitLimit bool
shouldHitCollLimit bool
skipped assert.ValueAssertionFunc
treeContainsFileIDsWithParent map[string]string
countLiveFiles int
@ -2160,7 +2246,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{
name: "add new file",
tree: treeWithRoot,
file: d.fileAtRoot(),
file: d.fileAt(root),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
counts: countTD.Expected{
@ -2178,7 +2264,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{
name: "duplicate file",
tree: treeWithFileAtRoot,
file: d.fileAtRoot(),
file: d.fileAt(root),
limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{
counts: countTD.Expected{
@ -2260,8 +2346,45 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{
name: "already at container file limit",
tree: treeWithFileAtRoot,
file: d.fileAtRoot(2),
limiter: newPagerLimiter(minimumLimitOpts()),
file: d.fileAt(root, 2),
limiter: unlimitedTotalBytesAndFiles,
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
err: require.Error,
shouldHitCollLimit: true,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{
fileID(): rootID,
},
countLiveFiles: 1,
countTotalBytes: defaultFileSize,
},
},
{
name: "goes over total byte limit",
tree: treeWithRoot,
file: d.fileAt(root),
limiter: unlimitedItemsPerContainer,
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
// no error here, since byte limit shouldn't
// make the func return an error.
err: require.NoError,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{},
countLiveFiles: 0,
countTotalBytes: 0,
},
},
{
name: "already over total byte limit",
tree: treeWithFileAtRoot,
file: d.fileAt(root, 2),
limiter: unlimitedItemsPerContainer,
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
@ -2276,23 +2399,6 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
countTotalBytes: defaultFileSize,
},
},
{
name: "goes over total byte limit",
tree: treeWithRoot,
file: d.fileAtRoot(),
limiter: newPagerLimiter(minimumLimitOpts()),
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
err: require.Error,
shouldHitLimit: true,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{},
countLiveFiles: 0,
countTotalBytes: 0,
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
@ -2318,6 +2424,10 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
test.expect.err(t, err, clues.ToCore(err))
test.expect.skipped(t, skipped)
if test.expect.shouldHitCollLimit {
require.ErrorIs(t, err, errHitCollectionLimit, clues.ToCore(err))
}
if test.expect.shouldHitLimit {
require.ErrorIs(t, err, errHitLimit, clues.ToCore(err))
}

View File

@ -2,8 +2,11 @@ package drive
import (
"context"
"sort"
"strings"
"github.com/alcionai/clues"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
@ -78,33 +81,29 @@ type nodeyMcNodeFace struct {
// required for mid-enumeration folder moves, else we have to walk
// the tree completely to remove the node from its old parent.
parent *nodeyMcNodeFace
// the microsoft item ID. Mostly because we might as well
// attach that to the node if we're also attaching the dir.
id string
// single directory name, not a path
name string
// folder is the actual drive item for this directory.
// we save this so that, during post-processing, it can
// get moved into the collection files, which will cause
// the collection processor to generate a permissions
// metadata file for the folder.
folder *custom.DriveItem
// contains the complete previous path
prev path.Path
// folderID -> node
children map[string]*nodeyMcNodeFace
// file item ID -> file metadata
files map[string]*custom.DriveItem
// for special handling protocols around packages
isPackage bool
}
func newNodeyMcNodeFace(
parent *nodeyMcNodeFace,
id, name string,
isPackage bool,
folder *custom.DriveItem,
) *nodeyMcNodeFace {
return &nodeyMcNodeFace{
parent: parent,
id: id,
name: name,
folder: folder,
children: map[string]*nodeyMcNodeFace{},
files: map[string]*custom.DriveItem{},
isPackage: isPackage,
}
}
@ -134,9 +133,14 @@ func (face *folderyMcFolderFace) getNode(id string) *nodeyMcNodeFace {
// values are updated to match (isPackage is assumed not to change).
func (face *folderyMcFolderFace) setFolder(
ctx context.Context,
parentID, id, name string,
isPackage bool,
folder *custom.DriveItem,
) error {
var (
id = ptr.Val(folder.GetId())
name = ptr.Val(folder.GetName())
parentFolder = folder.GetParentReference()
)
// need to ensure we have the minimum requirements met for adding a node.
if len(id) == 0 {
return clues.NewWC(ctx, "missing folder ID")
@ -146,16 +150,20 @@ func (face *folderyMcFolderFace) setFolder(
return clues.NewWC(ctx, "missing folder name")
}
if len(parentID) == 0 && id != face.rootID {
if (parentFolder == nil || len(ptr.Val(parentFolder.GetId())) == 0) &&
id != face.rootID {
return clues.NewWC(ctx, "non-root folder missing parent id")
}
// only set the root node once.
if id == face.rootID {
if face.root == nil {
root := newNodeyMcNodeFace(nil, id, name, isPackage)
root := newNodeyMcNodeFace(nil, folder)
face.root = root
face.folderIDToNode[id] = root
} else {
// but update the folder each time, to stay in sync with changes
face.root.folder = folder
}
return nil
@ -167,7 +175,7 @@ func (face *folderyMcFolderFace) setFolder(
// 3. existing folder migrated to new location.
// 4. tombstoned folder restored.
parent, ok := face.folderIDToNode[parentID]
parentNode, ok := face.folderIDToNode[ptr.Val(parentFolder.GetId())]
if !ok {
return clues.NewWC(ctx, "folder added before parent")
}
@ -184,9 +192,9 @@ func (face *folderyMcFolderFace) setFolder(
if zombey, tombstoned := face.tombstones[id]; tombstoned {
delete(face.tombstones, id)
zombey.parent = parent
zombey.name = name
parent.children[id] = zombey
zombey.parent = parentNode
zombey.folder = folder
parentNode.children[id] = zombey
face.folderIDToNode[id] = zombey
return nil
@ -204,21 +212,21 @@ func (face *folderyMcFolderFace) setFolder(
// technically shouldn't be possible but better to keep the problem tracked
// just in case.
logger.Ctx(ctx).Info("non-root folder already exists with no parent ref")
} else if nodey.parent != parent {
} else if nodey.parent != parentNode {
// change type 3. we need to ensure the old parent stops pointing to this node.
delete(nodey.parent.children, id)
}
nodey.name = name
nodey.parent = parent
nodey.parent = parentNode
nodey.folder = folder
} else {
// change type 1: new addition
nodey = newNodeyMcNodeFace(parent, id, name, isPackage)
nodey = newNodeyMcNodeFace(parentNode, folder)
}
// ensure the parent points to this node, and that the node is registered
// in the map of all nodes in the tree.
parent.children[id] = nodey
parentNode.children[id] = nodey
face.folderIDToNode[id] = nodey
return nil
@ -226,8 +234,10 @@ func (face *folderyMcFolderFace) setFolder(
func (face *folderyMcFolderFace) setTombstone(
ctx context.Context,
id string,
folder *custom.DriveItem,
) error {
id := ptr.Val(folder.GetId())
if len(id) == 0 {
return clues.NewWC(ctx, "missing tombstone folder ID")
}
@ -254,7 +264,7 @@ func (face *folderyMcFolderFace) setTombstone(
}
if _, alreadyBuried := face.tombstones[id]; !alreadyBuried {
face.tombstones[id] = newNodeyMcNodeFace(nil, id, "", false)
face.tombstones[id] = newNodeyMcNodeFace(nil, folder)
}
return nil
@ -298,7 +308,7 @@ func (face *folderyMcFolderFace) setPreviousPath(
return nil
}
zombey := newNodeyMcNodeFace(nil, folderID, "", false)
zombey := newNodeyMcNodeFace(nil, custom.NewDriveItem(folderID, ""))
zombey.prev = prev
face.tombstones[folderID] = zombey
@ -318,13 +328,20 @@ func (face *folderyMcFolderFace) hasFile(id string) bool {
// file was already added to the tree and is getting relocated,
// this func will update and/or clean up all the old references.
func (face *folderyMcFolderFace) addFile(
parentID, id string,
file *custom.DriveItem,
) error {
if len(parentID) == 0 {
var (
parentFolder = file.GetParentReference()
id = ptr.Val(file.GetId())
parentID string
)
if parentFolder == nil || len(ptr.Val(parentFolder.GetId())) == 0 {
return clues.New("item added without parent folder ID")
}
parentID = ptr.Val(parentFolder.GetId())
if len(id) == 0 {
return clues.New("item added without ID")
}
@ -419,17 +436,22 @@ func (face *folderyMcFolderFace) walkTreeAndBuildCollections(
return nil
}
isRoot := node == face.root
var (
id = ptr.Val(node.folder.GetId())
name = ptr.Val(node.folder.GetName())
isPackage = node.folder.GetPackageEscaped() != nil
isRoot = node == face.root
)
if !isRoot {
location = location.Append(node.name)
location = location.Append(name)
}
for _, child := range node.children {
err := face.walkTreeAndBuildCollections(
child,
location,
node.isPackage || isChildOfPackage,
isPackage || isChildOfPackage,
result)
if err != nil {
return err
@ -444,19 +466,134 @@ func (face *folderyMcFolderFace) walkTreeAndBuildCollections(
"path_suffix", location.Elements())
}
files := node.files
if !isRoot {
// add the folder itself to the list of files inside the folder.
// that will cause the collection processor to generate a metadata
// file to hold the folder's permissions.
files = maps.Clone(node.files)
files[id] = node.folder
}
cbl := collectable{
currPath: collectionPath,
files: node.files,
folderID: node.id,
isPackageOrChildOfPackage: node.isPackage || isChildOfPackage,
files: files,
folderID: id,
isPackageOrChildOfPackage: isPackage || isChildOfPackage,
prevPath: node.prev,
}
result[node.id] = cbl
result[id] = cbl
return nil
}
type idPrevPathTup struct {
id string
prevPath string
}
// fuses the collectables and old prevPaths into a
// new prevPaths map.
func (face *folderyMcFolderFace) generateNewPreviousPaths(
collectables map[string]collectable,
prevPaths map[string]string,
) (map[string]string, error) {
var (
// id -> currentPath
results = map[string]string{}
// prevPath -> currentPath
movedPaths = map[string]string{}
// prevPath -> {}
tombstoned = map[string]struct{}{}
)
// first, move all collectables into the new maps
for id, cbl := range collectables {
if cbl.currPath == nil {
tombstoned[cbl.prevPath.String()] = struct{}{}
continue
}
cp := cbl.currPath.String()
results[id] = cp
if cbl.prevPath != nil && cbl.prevPath.String() != cp {
movedPaths[cbl.prevPath.String()] = cp
}
}
// next, create a slice of tuples representing any
// old prevPath entry whose ID isn't already bound to
// a collectable.
unseenPrevPaths := []idPrevPathTup{}
for id, p := range prevPaths {
// if the current folder was tombstoned, skip it
if _, ok := tombstoned[p]; ok {
continue
}
if _, ok := results[id]; !ok {
unseenPrevPaths = append(unseenPrevPaths, idPrevPathTup{id, p})
}
}
// sort the slice by path, ascending.
// This ensures we work from root to leaf when replacing prefixes,
// and thus we won't need to walk every unseen path from leaf to
// root looking for a matching prefix.
sortByLeastPath := func(i, j int) bool {
return unseenPrevPaths[i].prevPath < unseenPrevPaths[j].prevPath
}
sort.Slice(unseenPrevPaths, sortByLeastPath)
for _, un := range unseenPrevPaths {
elems := path.NewElements(un.prevPath)
pb, err := path.Builder{}.UnescapeAndAppend(elems...)
if err != nil {
return nil, err
}
parent := pb.Dir().String()
// if the parent was tombstoned, add this prevPath entry to the
// tombstoned map; that'll allow the tombstone identification to
// cascade to children, and it won't get added to the results.
if _, ok := tombstoned[parent]; ok {
tombstoned[un.prevPath] = struct{}{}
continue
}
// if the parent wasn't moved, add the same path to the result set
parentCurrentPath, ok := movedPaths[parent]
if !ok {
results[un.id] = un.prevPath
continue
}
// if the parent was moved, replace the prefix and
// add it to the result set
// TODO: should probably use path.UpdateParent for this.
// but I want the quality-of-life of feeding it strings
// instead of parsing strings to paths here first.
newPath := strings.Replace(un.prevPath, parent, parentCurrentPath, 1)
results[un.id] = newPath
// add the current string to the moved list, that'll allow it to cascade to all children.
movedPaths[un.prevPath] = newPath
}
return results, nil
}
func (face *folderyMcFolderFace) generateExcludeItemIDs() map[string]struct{} {
result := map[string]struct{}{}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
@ -214,13 +215,13 @@ func collWithMBHAndOpts(
func aPage(items ...models.DriveItemable) nextPage {
return nextPage{
Items: append([]models.DriveItemable{driveRootFolder()}, items...),
Items: append([]models.DriveItemable{rootFolder()}, items...),
}
}
func aPageWReset(items ...models.DriveItemable) nextPage {
return nextPage{
Items: append([]models.DriveItemable{driveRootFolder()}, items...),
Items: append([]models.DriveItemable{rootFolder()}, items...),
Reset: true,
}
}
@ -310,9 +311,15 @@ func aColl(
) *collectionAssertion {
ids := make([]string, 0, 2*len(fileIDs))
for _, fUD := range fileIDs {
ids = append(ids, fUD+metadata.DataFileSuffix)
ids = append(ids, fUD+metadata.MetaFileSuffix)
for _, fID := range fileIDs {
ids = append(ids, fID+metadata.DataFileSuffix)
ids = append(ids, fID+metadata.MetaFileSuffix)
}
// should expect all non-root, non-tombstone collections to contain
// a dir meta file for storing permissions.
if curr != nil && !strings.HasSuffix(curr.Folder(false), root) {
ids = append(ids, metadata.DirMetaFileSuffix)
}
return &collectionAssertion{
@ -453,9 +460,10 @@ func newTree(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := newFolderyMcFolderFace(defaultTreePfx(t, d), rootID)
root := custom.ToCustomDriveItem(rootFolder())
//nolint:forbidigo
err := tree.setFolder(context.Background(), "", rootID, rootName, false)
err := tree.setFolder(context.Background(), root)
require.NoError(t, err, clues.ToCore(err))
return tree
@ -477,9 +485,10 @@ func treeWithFoldersAfterReset(t *testing.T, d *deltaDrive) *folderyMcFolderFace
func treeWithTombstone(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d)
folder := custom.ToCustomDriveItem(d.folderAt(root))
//nolint:forbidigo
err := tree.setTombstone(context.Background(), folderID())
err := tree.setTombstone(context.Background(), folder)
require.NoError(t, err, clues.ToCore(err))
return tree
@ -487,13 +496,15 @@ func treeWithTombstone(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFolders(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d)
parent := custom.ToCustomDriveItem(d.folderAt(root, "parent"))
folder := custom.ToCustomDriveItem(d.folderAt("parent"))
//nolint:forbidigo
err := tree.setFolder(context.Background(), rootID, folderID("parent"), folderName("parent"), true)
err := tree.setFolder(context.Background(), parent)
require.NoError(t, err, clues.ToCore(err))
//nolint:forbidigo
err = tree.setFolder(context.Background(), folderID("parent"), folderID(), folderName(), false)
err = tree.setFolder(context.Background(), folder)
require.NoError(t, err, clues.ToCore(err))
return tree
@ -502,7 +513,8 @@ func treeWithFolders(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFileAtRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d)
err := tree.addFile(rootID, fileID(), custom.ToCustomDriveItem(d.fileAtRoot()))
f := custom.ToCustomDriveItem(d.fileAt(root))
err := tree.addFile(f)
require.NoError(t, err, clues.ToCore(err))
return tree
@ -518,7 +530,8 @@ func treeWithDeletedFile(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFileInFolder(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithFolders(t, d)
err := tree.addFile(folderID(), fileID(), custom.ToCustomDriveItem(d.fileAt(folder)))
f := custom.ToCustomDriveItem(d.fileAt(folder))
err := tree.addFile(f)
require.NoError(t, err, clues.ToCore(err))
return tree
@ -545,7 +558,7 @@ func fullTree(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
}
func fullTreeWithNames(
parentFolderX, tombstoneX any,
parentFolderSuffix, tombstoneSuffix any,
) func(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
return func(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
ctx, flush := tester.NewContext(t)
@ -553,56 +566,47 @@ func fullTreeWithNames(
tree := treeWithRoot(t, d)
// file in root
df := driveFile(d.dir(), rootID, "r")
err := tree.addFile(
rootID,
fileID("r"),
custom.ToCustomDriveItem(df))
// file "r" in root
df := custom.ToCustomDriveItem(d.fileAt(root, "r"))
err := tree.addFile(df)
require.NoError(t, err, clues.ToCore(err))
// root -> folderID(parentX)
err = tree.setFolder(ctx, rootID, folderID(parentFolderX), folderName(parentFolderX), false)
parent := custom.ToCustomDriveItem(d.folderAt(root, parentFolderSuffix))
err = tree.setFolder(ctx, parent)
require.NoError(t, err, clues.ToCore(err))
// file in folderID(parentX)
df = driveFile(d.dir(folderName(parentFolderX)), folderID(parentFolderX), "p")
err = tree.addFile(
folderID(parentFolderX),
fileID("p"),
custom.ToCustomDriveItem(df))
// file "p" in folderID(parentX)
df = custom.ToCustomDriveItem(d.fileAt(parentFolderSuffix, "p"))
err = tree.addFile(df)
require.NoError(t, err, clues.ToCore(err))
// folderID(parentX) -> folderID()
err = tree.setFolder(ctx, folderID(parentFolderX), folderID(), folderName(), false)
fld := custom.ToCustomDriveItem(d.folderAt(parentFolderSuffix))
err = tree.setFolder(ctx, fld)
require.NoError(t, err, clues.ToCore(err))
// file in folderID()
df = driveFile(d.dir(folderName()), folderID())
err = tree.addFile(
folderID(),
fileID(),
custom.ToCustomDriveItem(df))
// file "f" in folderID()
df = custom.ToCustomDriveItem(d.fileAt(folder, "f"))
err = tree.addFile(df)
require.NoError(t, err, clues.ToCore(err))
// tombstone - have to set a non-tombstone folder first,
// then add the item,
// then tombstone the folder
err = tree.setFolder(ctx, rootID, folderID(tombstoneX), folderName(tombstoneX), false)
tomb := custom.ToCustomDriveItem(d.folderAt(root, tombstoneSuffix))
err = tree.setFolder(ctx, tomb)
require.NoError(t, err, clues.ToCore(err))
// file in tombstone
df = driveFile(d.dir(folderName(tombstoneX)), folderID(tombstoneX), "t")
err = tree.addFile(
folderID(tombstoneX),
fileID("t"),
custom.ToCustomDriveItem(df))
// file "t" in tombstone
df = custom.ToCustomDriveItem(d.fileAt(tombstoneSuffix, "t"))
err = tree.addFile(df)
require.NoError(t, err, clues.ToCore(err))
err = tree.setTombstone(ctx, folderID(tombstoneX))
err = tree.setTombstone(ctx, tomb)
require.NoError(t, err, clues.ToCore(err))
// deleted file
// deleted file "d"
tree.deleteFile(fileID("d"))
return tree
@ -1355,23 +1359,25 @@ func (dd *deltaDrive) fileAt(
parentSuffix any,
fileSuffixes ...any,
) models.DriveItemable {
return driveItem(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
dd.dir(folderName(parentSuffix)),
folderID(parentSuffix),
isFile)
}
func (dd *deltaDrive) fileAtRoot(
fileSuffixes ...any,
) models.DriveItemable {
if parentSuffix == root {
return driveItem(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
dd.dir(),
rootID,
isFile)
}
return driveItem(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
// the file's parent directory isn't used;
// this parameter is an artifact of the driveItem
// api and doesn't need to be populated for test
// success.
dd.dir(),
folderID(parentSuffix),
isFile)
}
func (dd *deltaDrive) fileWURLAtRoot(
@ -1391,10 +1397,12 @@ func (dd *deltaDrive) fileWURLAtRoot(
return di
}
func (dd *deltaDrive) fileWSizeAtRoot(
func (dd *deltaDrive) fileWSizeAt(
size int64,
parentSuffix any,
fileSuffixes ...any,
) models.DriveItemable {
if parentSuffix == root {
return driveItemWSize(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
@ -1402,17 +1410,12 @@ func (dd *deltaDrive) fileWSizeAtRoot(
rootID,
size,
isFile)
}
}
func (dd *deltaDrive) fileWSizeAt(
size int64,
parentSuffix any,
fileSuffixes ...any,
) models.DriveItemable {
return driveItemWSize(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
dd.dir(folderName(parentSuffix)),
dd.dir(),
folderID(parentSuffix),
size,
isFile)
@ -1442,9 +1445,9 @@ func driveFolder(
isFolder)
}
func driveRootFolder() models.DriveItemable {
func rootFolder() models.DriveItemable {
rootFolder := models.NewDriveItem()
rootFolder.SetName(ptr.To(rootName))
rootFolder.SetName(ptr.To(root))
rootFolder.SetId(ptr.To(rootID))
rootFolder.SetRoot(models.NewRoot())
rootFolder.SetFolder(models.NewFolder())
@ -1452,29 +1455,40 @@ func driveRootFolder() models.DriveItemable {
return rootFolder
}
func (dd *deltaDrive) folderAtRoot(
func (dd *deltaDrive) folderAt(
parentSuffix any,
folderSuffixes ...any,
) models.DriveItemable {
if parentSuffix == root {
return driveItem(
folderID(folderSuffixes...),
folderName(folderSuffixes...),
dd.dir(),
rootID,
isFolder)
}
}
func (dd *deltaDrive) folderAt(
parentSuffix any,
folderSuffixes ...any,
) models.DriveItemable {
return driveItem(
folderID(folderSuffixes...),
folderName(folderSuffixes...),
// we should be putting in the full location here, not just the
// parent suffix. But that full location would be unused because
// our unit tests don't utilize folder subselection (which is the
// only reason we need to provide the dir).
dd.dir(folderName(parentSuffix)),
folderID(parentSuffix),
isFolder)
}
func (dd *deltaDrive) packageAtRoot() models.DriveItemable {
return driveItem(
folderID(pkg),
folderName(pkg),
dd.dir(),
rootID,
isPackage)
}
// ---------------------------------------------------------------------------
// id, name, path factories
// ---------------------------------------------------------------------------
@ -1482,6 +1496,16 @@ func (dd *deltaDrive) folderAt(
// assumption is only one suffix per id. Mostly using
// the variadic as an "optional" extension.
func id(v string, suffixes ...any) string {
if len(suffixes) > 1 {
// this should fail any tests. we could pass in a
// testing.T instead and fail the call here, but that
// produces a whole lot of chaff where this check should
// still get us the expected failure
return fmt.Sprintf(
"too many suffixes in the ID; should only be 0 or 1, got %d",
len(suffixes))
}
id := fmt.Sprintf("id_%s", v)
// a bit weird, but acts as a quality of life
@ -1505,6 +1529,16 @@ func id(v string, suffixes ...any) string {
// assumption is only one suffix per name. Mostly using
// the variadic as an "optional" extension.
func name(v string, suffixes ...any) string {
if len(suffixes) > 1 {
// this should fail any tests. we could pass in a
// testing.T instead and fail the call here, but that
// produces a whole lot of chaff where this check should
// still get us the expected failure
return fmt.Sprintf(
"too many suffixes in the Name; should only be 0 or 1, got %d",
len(suffixes))
}
name := fmt.Sprintf("n_%s", v)
// a bit weird, but acts as a quality of life
@ -1542,20 +1576,19 @@ func toPath(elems ...string) string {
}
// produces the full path for the provided drive
func (dd *deltaDrive) strPath(elems ...string) string {
return toPath(append(
[]string{
tenant,
path.OneDriveService.String(),
user,
path.FilesCategory.String(),
odConsts.DriveFolderPrefixBuilder(dd.id).String(),
},
elems...)...)
func (dd *deltaDrive) strPath(t *testing.T, elems ...string) string {
return dd.fullPath(t, elems...).String()
}
func (dd *deltaDrive) fullPath(t *testing.T, elems ...string) path.Path {
p, err := path.FromDataLayerPath(dd.strPath(elems...), false)
p, err := odConsts.DriveFolderPrefixBuilder(dd.id).
Append(elems...).
ToDataLayerPath(
tenant,
user,
path.OneDriveService,
path.FilesCategory,
false)
require.NoError(t, err, clues.ToCore(err))
return p
@ -1564,9 +1597,9 @@ func (dd *deltaDrive) fullPath(t *testing.T, elems ...string) path.Path {
// produces a complete path prefix up to the drive root folder with any
// elements passed in appended to the generated prefix.
func (dd *deltaDrive) dir(elems ...string) string {
return toPath(append(
[]string{odConsts.DriveFolderPrefixBuilder(dd.id).String()},
elems...)...)
return odConsts.DriveFolderPrefixBuilder(dd.id).
Append(elems...).
String()
}
// common item names
@ -1583,7 +1616,7 @@ const (
nav = "nav"
pkg = "package"
rootID = odConsts.RootID
rootName = odConsts.RootPathDir
root = odConsts.RootPathDir
subfolder = "subfolder"
tenant = "t"
user = "u"

View File

@ -6,7 +6,10 @@ import (
"github.com/alcionai/corso/src/pkg/control"
)
var errHitLimit = clues.New("hit limiter limits")
var (
errHitLimit = clues.New("hit limiter limits")
errHitCollectionLimit = clues.New("hit item limits within the current collection")
)
type driveEnumerationStats struct {
numPages int
@ -111,9 +114,22 @@ func (l pagerLimiter) hitItemLimit(itemCount int) bool {
return l.enabled() && itemCount >= l.limits.MaxItems
}
// hitTotalBytesLimit returns true if the limiter is enabled and has reached the limit
// alreadyHitTotalBytesLimit returns true if the limiter is enabled and has reached the limit
// for the accumulated byte size of all items (the file contents, not the item metadata)
// added to collections for this backup.
func (l pagerLimiter) hitTotalBytesLimit(i int64) bool {
return l.enabled() && i >= l.limits.MaxBytes
func (l pagerLimiter) alreadyHitTotalBytesLimit(i int64) bool {
return l.enabled() && i > l.limits.MaxBytes
}
// willStepOverBytesLimit returns true if the limiter is enabled and the provided addition
// of bytes is greater than the limit plus some padding (to ensure we can always hit
// the limit).
func (l pagerLimiter) willStepOverBytesLimit(current, addition int64) bool {
if !l.enabled() {
return false
}
limitPlusPadding := int64(float64(l.limits.MaxBytes) * 1.03)
return (current + addition) > limitPlusPadding
}

View File

@ -34,9 +34,14 @@ type backupLimitTest struct {
// Collection name -> set of item IDs. We can't check item data because
// that's not mocked out. Metadata is checked separately.
expectedItemIDsInCollection map[string][]string
// Collection name -> set of item IDs. We can't check item data because
// that's not mocked out. Metadata is checked separately.
// the tree version has some different (more accurate) expectations
// for success
expectedItemIDsInCollectionTree map[string][]string
}
func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
func backupLimitTable(t *testing.T, d1, d2 *deltaDrive) []backupLimitTest {
return []backupLimitTest{
{
name: "OneDrive SinglePage ExcludeItemsOverMaxSize",
@ -50,12 +55,13 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
},
enumerator: driveEnumerator(
d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d1.fileWSizeAtRoot(7, "f1"),
d1.fileWSizeAtRoot(1, "f2"),
d1.fileWSizeAtRoot(1, "f3"))))),
delta(id(deltaURL), nil).with(
aPage(
d1.fileWSizeAt(7, root, "f1"),
d1.fileWSizeAt(1, root, "f2"),
d1.fileWSizeAt(1, root, "f3"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f2"), fileID("f3")},
d1.strPath(t): {fileID("f2"), fileID("f3")},
},
},
{
@ -70,12 +76,13 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
},
enumerator: driveEnumerator(
d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d1.fileWSizeAtRoot(1, "f1"),
d1.fileWSizeAtRoot(2, "f2"),
d1.fileWSizeAtRoot(1, "f3"))))),
delta(id(deltaURL), nil).with(
aPage(
d1.fileWSizeAt(1, root, "f1"),
d1.fileWSizeAt(2, root, "f2"),
d1.fileWSizeAt(1, root, "f3"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")},
d1.strPath(t): {fileID("f1"), fileID("f2")},
},
},
{
@ -90,14 +97,15 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
},
enumerator: driveEnumerator(
d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d1.fileWSizeAtRoot(1, "f1"),
d1.folderAtRoot(),
delta(id(deltaURL), nil).with(
aPage(
d1.fileWSizeAt(1, root, "f1"),
d1.folderAt(root),
d1.fileWSizeAt(2, folder, "f2"),
d1.fileWSizeAt(1, folder, "f3"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1")},
d1.strPath(folderName()): {folderID(), fileID("f2")},
d1.strPath(t): {fileID("f1")},
d1.strPath(t, folderName()): {folderID(), fileID("f2")},
},
},
{
@ -112,15 +120,16 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
},
enumerator: driveEnumerator(
d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3"),
d1.fileAtRoot("f4"),
d1.fileAtRoot("f5"),
d1.fileAtRoot("f6"))))),
delta(id(deltaURL), nil).with(
aPage(
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3"),
d1.fileAt(root, "f4"),
d1.fileAt(root, "f5"),
d1.fileAt(root, "f6"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
},
},
{
@ -137,19 +146,19 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2")),
aPage(
// Repeated items shouldn't count against the limit.
d1.fileAtRoot("f1"),
d1.folderAtRoot(),
d1.fileAt(root, "f1"),
d1.folderAt(root),
d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"),
d1.fileAt(folder, "f6"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")},
d1.strPath(folderName()): {folderID(), fileID("f3")},
d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(t, folderName()): {folderID(), fileID("f3")},
},
},
{
@ -166,16 +175,16 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"),
d1.fileAt(folder, "f6"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")},
d1.strPath(t): {fileID("f1"), fileID("f2")},
},
},
{
@ -192,18 +201,22 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{
// Root has an additional item. It's hard to fix that in the code
// though.
d1.strPath(): {fileID("f1"), fileID("f2")},
d1.strPath(folderName()): {folderID(), fileID("f4")},
// Root has an additional item. It's hard to fix that in the code though.
d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(t, folderName()): {folderID(), fileID("f4")},
},
expectedItemIDsInCollectionTree: map[string][]string{
// the tree version doesn't have this problem.
d1.strPath(t): {fileID("f1")},
d1.strPath(t, folderName()): {folderID(), fileID("f4")},
},
},
{
@ -220,18 +233,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f1"),
d1.fileAt(folder, "f2")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
// Updated item that shouldn't count against the limit a second time.
d1.fileAt(folder, "f2"),
d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {},
d1.strPath(folderName()): {folderID(), fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t): {},
d1.strPath(t, folderName()): {folderID(), fileID("f1"), fileID("f2"), fileID("f3")},
},
},
{
@ -248,19 +261,26 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
// Put folder 0 at limit.
d1.folderAtRoot(),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
// Put root/folder at limit.
d1.folderAt(root),
d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
// Try to move item from root to folder 0 which is already at the limit.
d1.fileAt(folder, "f1"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")},
d1.strPath(folderName()): {folderID(), fileID("f3"), fileID("f4")},
d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(t, folderName()): {folderID(), fileID("f3"), fileID("f4")},
},
expectedItemIDsInCollectionTree: map[string][]string{
d1.strPath(t): {fileID("f2")},
// note that the tree version allows f1 to get moved.
// we've already committed to backing up the file as part of the preview,
// it doesn't seem rational to prevent its movement
d1.strPath(t, folderName()): {folderID(), fileID("f3"), fileID("f4"), fileID("f1")},
},
},
{
@ -277,18 +297,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f4")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
},
},
{
@ -305,21 +325,21 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"),
// This container shouldn't be returned.
d1.folderAtRoot(2),
d1.folderAt(root, 2),
d1.fileAt(2, "f7"),
d1.fileAt(2, "f8"),
d1.fileAt(2, "f9"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
},
},
{
@ -336,22 +356,22 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5")),
aPage(
// This container shouldn't be returned.
d1.folderAtRoot(2),
d1.folderAt(root, 2),
d1.fileAt(2, "f7"),
d1.fileAt(2, "f8"),
d1.fileAt(2, "f9"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
},
},
{
@ -366,22 +386,24 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
},
enumerator: driveEnumerator(
d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3"),
d1.fileAtRoot("f4"),
d1.fileAtRoot("f5")))),
delta(id(deltaURL), nil).with(
aPage(
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3"),
d1.fileAt(root, "f4"),
d1.fileAt(root, "f5")))),
d2.newEnumer().with(
delta(id(deltaURL), nil).with(aPage(
d2.fileAtRoot("f1"),
d2.fileAtRoot("f2"),
d2.fileAtRoot("f3"),
d2.fileAtRoot("f4"),
d2.fileAtRoot("f5"))))),
delta(id(deltaURL), nil).with(
aPage(
d2.fileAt(root, "f1"),
d2.fileAt(root, "f2"),
d2.fileAt(root, "f3"),
d2.fileAt(root, "f4"),
d2.fileAt(root, "f5"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d2.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d2.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
},
},
{
@ -397,18 +419,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with(
delta(id(deltaURL), nil).with(
aPage(
d1.fileAtRoot("f1"),
d1.fileAtRoot("f2"),
d1.fileAtRoot("f3")),
d1.fileAt(root, "f1"),
d1.fileAt(root, "f2"),
d1.fileAt(root, "f3")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f4")),
aPage(
d1.folderAtRoot(),
d1.folderAt(root),
d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")},
d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
},
},
}
@ -427,8 +449,6 @@ func (suite *LimiterUnitSuite) TestGet_PreviewLimits_noTree() {
// checks that don't examine metadata, collection states, etc. They really just
// check the expected items appear.
func (suite *LimiterUnitSuite) TestGet_PreviewLimits_tree() {
suite.T().Skip("TODO: unskip when tree produces collections")
opts := control.DefaultOptions()
opts.ToggleFeatures.UseDeltaTree = true
@ -441,7 +461,7 @@ func iterGetPreviewLimitsTests(
) {
d1, d2 := drive(), drive(2)
for _, test := range backupLimitTable(d1, d2) {
for _, test := range backupLimitTable(suite.T(), d1, d2) {
suite.Run(test.name, func() {
runGetPreviewLimits(
suite.T(),
@ -521,9 +541,15 @@ func runGetPreviewLimits(
itemIDs = append(itemIDs, id)
}
expectItemIDs := test.expectedItemIDsInCollection[folderPath]
if opts.ToggleFeatures.UseDeltaTree && test.expectedItemIDsInCollectionTree != nil {
expectItemIDs = test.expectedItemIDsInCollectionTree[folderPath]
}
assert.ElementsMatchf(
t,
test.expectedItemIDsInCollection[folderPath],
expectItemIDs,
itemIDs,
"item IDs in collection with path:\n\t%q",
folderPath)
@ -542,6 +568,9 @@ type defaultLimitTestExpects struct {
numItems int
numContainers int
numItemsPerContainer int
// the tree handling behavior may deviate under certain conditions
// since it allows one file to slightly step over the byte limit
numItemsTreePadding int
}
type defaultLimitTest struct {
@ -641,6 +670,7 @@ func defaultLimitsTable() []defaultLimitTest {
numItems: int(defaultPreviewMaxBytes) / 1024 / 1024,
numContainers: 1,
numItemsPerContainer: int(defaultPreviewMaxBytes) / 1024 / 1024,
numItemsTreePadding: 1,
},
},
}
@ -666,8 +696,6 @@ func (suite *LimiterUnitSuite) TestGet_PreviewLimits_defaultsNoTree() {
// These tests run a reduced set of checks that really just look for item counts
// and such. Other tests are expected to provide more comprehensive checks.
func (suite *LimiterUnitSuite) TestGet_PreviewLimits_defaultsWithTree() {
suite.T().Skip("TODO: unskip when tree produces collections")
opts := control.DefaultOptions()
opts.ToggleFeatures.UseDeltaTree = true
@ -714,7 +742,7 @@ func runGetPreviewLimitsDefaults(
for containerIdx := 0; containerIdx < test.numContainers; containerIdx++ {
page := nextPage{
Items: []models.DriveItemable{
driveRootFolder(),
rootFolder(),
driveItem(
folderID(containerIdx),
folderName(containerIdx),
@ -798,11 +826,16 @@ func runGetPreviewLimitsDefaults(
numItems += len(col.driveItems)
// Add one to account for the folder permissions item.
expected := test.expect.numItemsPerContainer + 1
if opts.ToggleFeatures.UseDeltaTree {
expected += test.expect.numItemsTreePadding
}
assert.Len(
t,
col.driveItems,
test.expect.numItemsPerContainer+1,
"items in container %v",
expected,
"number of items in collection at:\n\t%+v",
col.FullPath())
}
@ -810,12 +843,18 @@ func runGetPreviewLimitsDefaults(
t,
test.expect.numContainers,
numContainers,
"total containers")
"total count of collections")
// Add one to account for the folder permissions item.
expected := test.expect.numItems + test.expect.numContainers
if opts.ToggleFeatures.UseDeltaTree {
expected += test.expect.numItemsTreePadding
}
// Each container also gets an item so account for that here.
assert.Equal(
t,
test.expect.numItems+test.expect.numContainers,
expected,
numItems,
"total items across all containers")
"total sum of item counts in all collections")
}

View File

@ -509,7 +509,7 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
pages: []nextPage{
aPage(
d.fileWURLAtRoot(aURL(1), false, 1),
d.folderAtRoot(2)),
d.folderAt(root, 2)),
},
expectedItemProps: map[string]itemProps{
fileID(2): {},

View File

@ -33,6 +33,15 @@ type DriveItem struct {
additionalData map[string]any
}
func NewDriveItem(
id, name string,
) *DriveItem {
return &DriveItem{
id: ptr.To(id),
name: ptr.To(name),
}
}
// Disable revive linter since we want to follow naming scheme used by graph SDK here.
// nolint: revive
func (c *DriveItem) GetId() *string {