fix up test fullPath generation (#4833)

fixes up the generation of paths in drive testing so that paths are created correctly and consistently, even for values that will have escaped slashes.

---

#### Does this PR need a docs update or release note?

- [x]  No

#### Type of change

- [x] 🤖 Supportability/Tests

#### Issue(s)

* #4689

#### Test Plan

- [x]  Unit test
This commit is contained in:
Keepers 2023-12-15 14:44:26 -07:00 committed by GitHub
parent e3363aaa46
commit 44078e1db2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1667 additions and 1012 deletions

File diff suppressed because it is too large Load Diff

View File

@ -221,6 +221,7 @@ func (c *Collections) makeDriveCollections(
ctx, ctx,
tree, tree,
drv, drv,
prevPaths,
prevDeltaLink, prevDeltaLink,
countPagesInDelta, countPagesInDelta,
errs) errs)
@ -457,6 +458,14 @@ func (c *Collections) enumeratePageOfItems(
return err return err
} }
// special case: we only want to add a limited number of files
// to each collection. But if one collection fills up, we don't
// want to break out of the whole backup. That allows us to preview
// many folders with a small selection of files in each.
if errors.Is(err, errHitCollectionLimit) {
continue
}
el.AddRecoverable(ictx, clues.Wrap(err, "adding folder")) el.AddRecoverable(ictx, clues.Wrap(err, "adding folder"))
} }
} }
@ -479,8 +488,6 @@ func (c *Collections) addFolderToTree(
isDeleted = folder.GetDeleted() != nil isDeleted = folder.GetDeleted() != nil
isMalware = folder.GetMalware() != nil isMalware = folder.GetMalware() != nil
isPkg = folder.GetPackageEscaped() != nil isPkg = folder.GetPackageEscaped() != nil
parent = folder.GetParentReference()
parentID string
notSelected bool notSelected bool
) )
@ -489,10 +496,6 @@ func (c *Collections) addFolderToTree(
return nil, errHitLimit return nil, errHitLimit
} }
if parent != nil {
parentID = ptr.Val(parent.GetId())
}
defer func() { defer func() {
switch { switch {
case notSelected: case notSelected:
@ -525,7 +528,7 @@ func (c *Collections) addFolderToTree(
} }
if isDeleted { if isDeleted {
err := tree.setTombstone(ctx, folderID) err := tree.setTombstone(ctx, folder)
return nil, clues.Stack(err).OrNil() return nil, clues.Stack(err).OrNil()
} }
@ -541,7 +544,7 @@ func (c *Collections) addFolderToTree(
return nil, nil return nil, nil
} }
err = tree.setFolder(ctx, parentID, folderID, folderName, isPkg) err = tree.setFolder(ctx, folder)
return nil, clues.Stack(err).OrNil() return nil, clues.Stack(err).OrNil()
} }
@ -635,22 +638,32 @@ func (c *Collections) addFileToTree(
if parentNotNil && !alreadySeen { if parentNotNil && !alreadySeen {
countSize := tree.countLiveFilesAndSizes() countSize := tree.countLiveFilesAndSizes()
// Don't add new items if the new collection has already reached it's limit. // Tell the enumerator to exit if we've already hit the total
// item moves and updates are generally allowed through. // limit of bytes or items in this backup.
if limiter.atContainerItemsLimit(len(parentNode.files)) || limiter.hitItemLimit(countSize.numFiles) { if limiter.alreadyHitTotalBytesLimit(countSize.totalBytes) ||
limiter.hitItemLimit(countSize.numFiles) {
return nil, errHitLimit return nil, errHitLimit
} }
// Skip large files that don't fit within the size limit. // Don't add new items if the new collection has already reached it's limit.
// unlike the other checks, which see if we're already at the limit, this check // item moves and updates are generally allowed through.
// needs to be forward-facing to ensure we don't go far over the limit. if limiter.atContainerItemsLimit(len(parentNode.files)) {
return nil, errHitCollectionLimit
}
// Don't include large files that don't fit within the size limit.
// Unlike the other checks, which see if we're already at the limit,
// this check needs to be forward-facing to ensure we don't go far
// over the limit
// Example case: a 1gb limit and a 25gb file. // Example case: a 1gb limit and a 25gb file.
if limiter.hitTotalBytesLimit(fileSize + countSize.totalBytes) { if limiter.willStepOverBytesLimit(countSize.totalBytes, fileSize) {
return nil, errHitLimit // don't return errHitLimit here; we only want to skip the
// current file. We may not want to skip files after it.
return nil, nil
} }
} }
err := tree.addFile(parentID, fileID, file) err := tree.addFile(file)
if err != nil { if err != nil {
return nil, clues.StackWC(ctx, err) return nil, clues.StackWC(ctx, err)
} }
@ -776,6 +789,7 @@ func (c *Collections) turnTreeIntoCollections(
ctx context.Context, ctx context.Context,
tree *folderyMcFolderFace, tree *folderyMcFolderFace,
drv models.Driveable, drv models.Driveable,
prevPaths map[string]string,
prevDeltaLink string, prevDeltaLink string,
countPagesInDelta int, countPagesInDelta int,
errs *fault.Bus, errs *fault.Bus,
@ -792,12 +806,11 @@ func (c *Collections) turnTreeIntoCollections(
} }
var ( var (
collections = []data.BackupCollection{} collections = []data.BackupCollection{}
newPrevPaths = map[string]string{} uc *urlCache
uc *urlCache el = errs.Local()
el = errs.Local() driveID = ptr.Val(drv.GetId())
driveID = ptr.Val(drv.GetId()) driveName = ptr.Val(drv.GetName())
driveName = ptr.Val(drv.GetName())
) )
// Attach an url cache to the drive if the number of discovered items is // Attach an url cache to the drive if the number of discovered items is
@ -825,15 +838,11 @@ func (c *Collections) turnTreeIntoCollections(
} }
} }
for id, cbl := range collectables { for _, cbl := range collectables {
if el.Failure() != nil { if el.Failure() != nil {
break break
} }
if cbl.currPath != nil {
newPrevPaths[id] = cbl.currPath.String()
}
coll, err := NewCollection( coll, err := NewCollection(
c.handler, c.handler,
c.protectedResource, c.protectedResource,
@ -856,5 +865,16 @@ func (c *Collections) turnTreeIntoCollections(
collections = append(collections, coll) collections = append(collections, coll)
} }
return collections, newPrevPaths, tree.generateExcludeItemIDs(), el.Failure() if el.Failure() != nil {
return nil, nil, nil, el.Failure()
}
// use the collectables and old previous paths
// to generate new previous paths
newPrevPaths, err := tree.generateNewPreviousPaths(collectables, prevPaths)
if err != nil {
return nil, nil, nil, clues.WrapWC(ctx, err, "generating new previous paths")
}
return collections, newPrevPaths, tree.generateExcludeItemIDs(), nil
} }

View File

@ -237,6 +237,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_GetTree() {
// to ensure we stitch the parts together correctly. // to ensure we stitch the parts together correctly.
func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() { func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d := drive() d := drive()
t := suite.T()
table := []struct { table := []struct {
name string name string
@ -265,7 +266,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage()))), aPage()))),
prevPaths: map[string]string{ prevPaths: map[string]string{
folderID(): d.strPath(folderName()), folderID(): d.strPath(t, folderName()),
}, },
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 1, count.PrevPaths: 1,
@ -277,7 +278,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
enumerator: driveEnumerator( enumerator: driveEnumerator(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot(), d.fileAt(folder))))), aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{}, prevPaths: map[string]string{},
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 0, count.PrevPaths: 0,
@ -289,9 +292,11 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
enumerator: driveEnumerator( enumerator: driveEnumerator(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot(), d.fileAt(folder))))), aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{ prevPaths: map[string]string{
folderID(): d.strPath(folderName()), folderID(): d.strPath(t, folderName()),
}, },
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 1, count.PrevPaths: 1,
@ -319,7 +324,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
aReset(), aReset(),
aPage()))), aPage()))),
prevPaths: map[string]string{ prevPaths: map[string]string{
folderID(): d.strPath(folderName()), folderID(): d.strPath(t, folderName()),
}, },
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 1, count.PrevPaths: 1,
@ -332,7 +337,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d.newEnumer().with( d.newEnumer().with(
deltaWReset(id(deltaURL), nil).with( deltaWReset(id(deltaURL), nil).with(
aReset(), aReset(),
aPage(d.folderAtRoot(), d.fileAt(folder))))), aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{}, prevPaths: map[string]string{},
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 0, count.PrevPaths: 0,
@ -345,9 +352,11 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
d.newEnumer().with( d.newEnumer().with(
deltaWReset(id(deltaURL), nil).with( deltaWReset(id(deltaURL), nil).with(
aReset(), aReset(),
aPage(d.folderAtRoot(), d.fileAt(folder))))), aPage(
d.folderAt(root),
d.fileAt(folder))))),
prevPaths: map[string]string{ prevPaths: map[string]string{
folderID(): d.strPath(folderName()), folderID(): d.strPath(t, folderName()),
}, },
expectCounts: countTD.Expected{ expectCounts: countTD.Expected{
count.PrevPaths: 1, count.PrevPaths: 1,
@ -384,6 +393,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeDriveCollections() {
func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors() { func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors() {
d := drive() d := drive()
t := suite.T()
table := []struct { table := []struct {
name string name string
@ -395,8 +405,8 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "no error - normal usage", name: "no error - normal usage",
tree: treeWithFolders, tree: treeWithFolders,
prevPaths: map[string]string{ prevPaths: map[string]string{
folderID("parent"): d.strPath(folderName("parent")), folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("parent"), folderName()),
}, },
expectErr: require.NoError, expectErr: require.NoError,
}, },
@ -410,7 +420,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "no error - folder not visited in this delta", name: "no error - folder not visited in this delta",
tree: treeWithFolders, tree: treeWithFolders,
prevPaths: map[string]string{ prevPaths: map[string]string{
id("santa"): d.strPath(name("santa")), id("santa"): d.strPath(t, name("santa")),
}, },
expectErr: require.NoError, expectErr: require.NoError,
}, },
@ -418,7 +428,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
name: "empty key in previous paths", name: "empty key in previous paths",
tree: treeWithFolders, tree: treeWithFolders,
prevPaths: map[string]string{ prevPaths: map[string]string{
"": d.strPath(folderName("parent")), "": d.strPath(t, folderName("parent")),
}, },
expectErr: require.Error, expectErr: require.Error,
}, },
@ -460,6 +470,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddPrevPathsToTree_errors
func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections() { func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections() {
d := drive() d := drive()
t := suite.T()
type expected struct { type expected struct {
prevPaths map[string]string prevPaths map[string]string
@ -481,9 +492,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
enableURLCache: true, enableURLCache: true,
expect: expected{ expect: expected{
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent")), folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("parent"), folderName()),
}, },
collections: func(t *testing.T, d *deltaDrive) expectedCollections { collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections( return expectCollections(
@ -500,13 +511,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl( aColl(
d.fullPath(t, folderName("parent"), folderName()), d.fullPath(t, folderName("parent"), folderName()),
nil, nil,
fileID())) fileID("f")))
}, },
globalExcludedFileIDs: makeExcludeMap( globalExcludedFileIDs: makeExcludeMap(
fileID("r"), fileID("r"),
fileID("p"), fileID("p"),
fileID("d"), fileID("d"),
fileID()), fileID("f")),
}, },
}, },
{ {
@ -514,16 +525,20 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
tree: fullTree, tree: fullTree,
enableURLCache: true, enableURLCache: true,
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent-prev")), folderID("parent"): d.strPath(t, folderName("parent-prev")),
folderID(): d.strPath(folderName("parent-prev"), folderName()), folderID(): d.strPath(t, folderName("parent-prev"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone-prev")), folderID("prev"): d.strPath(t, folderName("parent-prev"), folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("parent-prev"), folderName("prev"), folderName("prev-chld")),
folderID("tombstone"): d.strPath(t, folderName("tombstone-prev")),
}, },
expect: expected{ expect: expected{
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent")), folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("prev"): d.strPath(t, folderName("parent"), folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("parent"), folderName("prev"), folderName("prev-chld")),
}, },
collections: func(t *testing.T, d *deltaDrive) expectedCollections { collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections( return expectCollections(
@ -540,31 +555,35 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl( aColl(
d.fullPath(t, folderName("parent"), folderName()), d.fullPath(t, folderName("parent"), folderName()),
d.fullPath(t, folderName("parent-prev"), folderName()), d.fullPath(t, folderName("parent-prev"), folderName()),
fileID()), fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone-prev")))) aColl(nil, d.fullPath(t, folderName("tombstone-prev"))))
}, },
globalExcludedFileIDs: makeExcludeMap( globalExcludedFileIDs: makeExcludeMap(
fileID("r"), fileID("r"),
fileID("p"), fileID("p"),
fileID("d"), fileID("d"),
fileID()), fileID("f")),
}, },
}, },
{ {
name: "all folders moved - todo: path separator string check", name: "all folders moved - path separator string check",
tree: fullTreeWithNames("parent", "tombstone"), tree: fullTreeWithNames("pa/rent", "to/mbstone"),
enableURLCache: true, enableURLCache: true,
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent-prev")), folderID("pa/rent"): d.strPath(t, folderName("parent/prev")),
folderID(): d.strPath(folderName("parent-prev"), folderName()), folderID(): d.strPath(t, folderName("parent/prev"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone-prev")), folderID("pr/ev"): d.strPath(t, folderName("parent/prev"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("parent/prev"), folderName("pr/ev"), folderName("prev/chld")),
folderID("to/mbstone"): d.strPath(t, folderName("tombstone/prev")),
}, },
expect: expected{ expect: expected{
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent")), folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
}, },
collections: func(t *testing.T, d *deltaDrive) expectedCollections { collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections( return expectCollections(
@ -575,37 +594,45 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
d.fullPath(t), d.fullPath(t),
fileID("r")), fileID("r")),
aColl( aColl(
d.fullPath(t, folderName("parent")), d.fullPath(t, folderName("pa/rent")),
d.fullPath(t, folderName("parent-prev")), d.fullPath(t, folderName("parent/prev")),
fileID("p")), fileID("p")),
aColl( aColl(
d.fullPath(t, folderName("parent"), folderName()), d.fullPath(t, folderName("pa/rent"), folderName()),
d.fullPath(t, folderName("parent-prev"), folderName()), d.fullPath(t, folderName("parent/prev"), folderName()),
fileID()), fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone-prev")))) aColl(nil, d.fullPath(t, folderName("tombstone/prev"))))
}, },
globalExcludedFileIDs: makeExcludeMap( globalExcludedFileIDs: makeExcludeMap(
fileID("r"), fileID("r"),
fileID("p"), fileID("p"),
fileID("d"), fileID("d"),
fileID()), fileID("f")),
}, },
}, },
{ {
name: "no folders moved", name: "nothing in the tree was moved " +
"but there were some folders in the previous paths that " +
"didn't appear in the delta so those have to appear in the " +
"new previous paths but those weren't moved either so " +
"everything should have the same path at the end",
tree: fullTree, tree: fullTree,
enableURLCache: true, enableURLCache: true,
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent")), folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("tombstone"): d.strPath(folderName("tombstone")), folderID("tombstone"): d.strPath(t, folderName("tombstone")),
folderID("prev"): d.strPath(t, folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("prev"), folderName("prev-chld")),
}, },
expect: expected{ expect: expected{
prevPaths: map[string]string{ prevPaths: map[string]string{
rootID: d.strPath(), rootID: d.strPath(t),
folderID("parent"): d.strPath(folderName("parent")), folderID("parent"): d.strPath(t, folderName("parent")),
folderID(): d.strPath(folderName("parent"), folderName()), folderID(): d.strPath(t, folderName("parent"), folderName()),
folderID("prev"): d.strPath(t, folderName("prev")),
folderID("prev-chld"): d.strPath(t, folderName("prev"), folderName("prev-chld")),
}, },
collections: func(t *testing.T, d *deltaDrive) expectedCollections { collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections( return expectCollections(
@ -622,14 +649,64 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
aColl( aColl(
d.fullPath(t, folderName("parent"), folderName()), d.fullPath(t, folderName("parent"), folderName()),
d.fullPath(t, folderName("parent"), folderName()), d.fullPath(t, folderName("parent"), folderName()),
fileID()), fileID("f")),
aColl(nil, d.fullPath(t, folderName("tombstone")))) aColl(nil, d.fullPath(t, folderName("tombstone"))))
}, },
globalExcludedFileIDs: makeExcludeMap( globalExcludedFileIDs: makeExcludeMap(
fileID("r"), fileID("r"),
fileID("p"), fileID("p"),
fileID("d"), fileID("d"),
fileID()), fileID("f")),
},
},
{
name: "nothing in the tree was moved " +
"but there were some folders in the previous paths that " +
"didn't appear in the delta so those have to appear in the " +
"new previous paths but those weren't moved either so " +
"everything should have the same path at the end " +
"- the version with path separators chars in the directory names",
tree: fullTreeWithNames("pa/rent", "to/mbstone"),
enableURLCache: true,
prevPaths: map[string]string{
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
folderID("to/mbstone"): d.strPath(t, folderName("to/mbstone")),
},
expect: expected{
prevPaths: map[string]string{
rootID: d.strPath(t),
folderID("pa/rent"): d.strPath(t, folderName("pa/rent")),
folderID(): d.strPath(t, folderName("pa/rent"), folderName()),
folderID("pr/ev"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev")),
folderID("prev/chld"): d.strPath(t, folderName("pa/rent"), folderName("pr/ev"), folderName("prev/chld")),
},
collections: func(t *testing.T, d *deltaDrive) expectedCollections {
return expectCollections(
false,
true,
aColl(
d.fullPath(t),
d.fullPath(t),
fileID("r")),
aColl(
d.fullPath(t, folderName("pa/rent")),
d.fullPath(t, folderName("pa/rent")),
fileID("p")),
aColl(
d.fullPath(t, folderName("pa/rent"), folderName()),
d.fullPath(t, folderName("pa/rent"), folderName()),
fileID("f")),
aColl(nil, d.fullPath(t, folderName("to/mbstone"))))
},
globalExcludedFileIDs: makeExcludeMap(
fileID("r"),
fileID("p"),
fileID("d"),
fileID("f")),
}, },
}, },
} }
@ -656,6 +733,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_TurnTreeIntoCollections()
ctx, ctx,
tree, tree,
d.able, d.able,
test.prevPaths,
deltaURL, deltaURL,
countPages, countPages,
fault.New(true)) fault.New(true))
@ -782,10 +860,10 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
enumerator: driveEnumerator( enumerator: driveEnumerator(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage(d.folderAtRoot()), aPage(d.folderAt(root)),
aPage(d.folderAtRoot("sib")), aPage(d.folderAt(root, "sib")),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAt(folder, "chld"))))), d.folderAt(folder, "chld"))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{ expect: populateTreeExpected{
@ -815,13 +893,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder)), d.fileAt(folder)),
aPage( aPage(
d.folderAtRoot("sib"), d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")), d.fileAt("sib", "fsib")),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAt(folder, "chld"), d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))), d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
@ -917,7 +995,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder)), d.fileAt(folder)),
aPage(delItem(folderID(), rootID, isFolder))))), aPage(delItem(folderID(), rootID, isFolder))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
@ -950,7 +1028,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot("parent"), d.folderAt(root, "parent"),
driveItem(folderID(), folderName("moved"), d.dir(), folderID("parent"), isFolder), driveItem(folderID(), folderName("moved"), d.dir(), folderID("parent"), isFolder),
driveFile(d.dir(folderName("parent"), folderName()), folderID())), driveFile(d.dir(folderName("parent"), folderName()), folderID())),
aPage(delItem(folderID(), folderID("parent"), isFolder))))), aPage(delItem(folderID(), folderID("parent"), isFolder))))),
@ -986,7 +1064,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage(delItem(folderID(), rootID, isFolder)), aPage(delItem(folderID(), rootID, isFolder)),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))))), d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{ expect: populateTreeExpected{
@ -1018,7 +1096,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage(delItem(folderID(), rootID, isFolder)), aPage(delItem(folderID(), rootID, isFolder)),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))))), d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{ expect: populateTreeExpected{
@ -1049,13 +1127,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder)), d.fileAt(folder)),
aPage( aPage(
d.folderAtRoot("sib"), d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")), d.fileAt("sib", "fsib")),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAt(folder, "chld"), d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))), d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(minimumLimitOpts()), limiter: newPagerLimiter(minimumLimitOpts()),
@ -1085,13 +1163,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_singleDelta(
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder)), d.fileAt(folder)),
aPage( aPage(
d.folderAtRoot("sib"), d.folderAt(root, "sib"),
d.fileAt("sib", "fsib")), d.fileAt("sib", "fsib")),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAt(folder, "chld"), d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))), d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(minimumLimitOpts()), limiter: newPagerLimiter(minimumLimitOpts()),
@ -1136,15 +1214,15 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil). delta(id(deltaURL), nil).
with(aPage( with(aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))), d.fileAt(folder))),
delta(id(deltaURL), nil). delta(id(deltaURL), nil).
with(aPage( with(aPage(
d.folderAtRoot("sib"), d.folderAt(root, "sib"),
d.fileAt("sib", "fsib"))), d.fileAt("sib", "fsib"))),
delta(id(deltaURL), nil). delta(id(deltaURL), nil).
with(aPage( with(aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAt(folder, "chld"), d.folderAt(folder, "chld"),
d.fileAt("chld", "fchld"))))), d.fileAt("chld", "fchld"))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
@ -1182,7 +1260,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))), d.fileAt(folder))),
// a (delete,create) pair in the same delta can occur when // a (delete,create) pair in the same delta can occur when
// a user deletes and restores an item in-between deltas. // a user deletes and restores an item in-between deltas.
@ -1191,7 +1269,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
delItem(folderID(), rootID, isFolder), delItem(folderID(), rootID, isFolder),
delItem(fileID(), folderID(), isFile)), delItem(fileID(), folderID(), isFile)),
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))))), d.fileAt(folder))))),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: populateTreeExpected{ expect: populateTreeExpected{
@ -1222,7 +1300,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
d.newEnumer().with( d.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder))), d.fileAt(folder))),
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
@ -1260,7 +1338,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_PopulateTree_multiDelta()
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
// first page: create /root/folder and /root/folder/file // first page: create /root/folder and /root/folder/file
aPage( aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder)), d.fileAt(folder)),
// assume the user makes changes at this point: // assume the user makes changes at this point:
// * create a new /root/folder // * create a new /root/folder
@ -1442,9 +1520,9 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "many folders in a hierarchy", name: "many folders in a hierarchy",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
d.folderAtRoot(), d.folderAt(root),
d.folderAtRoot("sib"), d.folderAt(folder, "chld"),
d.folderAt(folder, "chld")), d.folderAt(root, "sib")),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -1465,7 +1543,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "create->delete", name: "create->delete",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
d.folderAtRoot(), d.folderAt(root),
delItem(folderID(), rootID, isFolder)), delItem(folderID(), rootID, isFolder)),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
@ -1485,7 +1563,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
name: "move->delete", name: "move->delete",
tree: treeWithFolders, tree: treeWithFolders,
page: aPage( page: aPage(
d.folderAtRoot("parent"), d.folderAt(root, "parent"),
driveItem(folderID(), folderName("moved"), d.dir(folderName("parent")), folderID("parent"), isFolder), driveItem(folderID(), folderName("moved"), d.dir(folderName("parent")), folderID("parent"), isFolder),
delItem(folderID(), folderID("parent"), isFolder)), delItem(folderID(), folderID("parent"), isFolder)),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
@ -1510,7 +1588,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
delItem(folderID(), rootID, isFolder), delItem(folderID(), rootID, isFolder),
d.folderAtRoot()), d.folderAt(root)),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -1531,7 +1609,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
delItem(folderID(), rootID, isFolder), delItem(folderID(), rootID, isFolder),
d.folderAtRoot()), d.folderAt(root)),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -1596,7 +1674,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_fold
func (suite *CollectionsTreeUnitSuite) TestCollections_AddFolderToTree() { func (suite *CollectionsTreeUnitSuite) TestCollections_AddFolderToTree() {
var ( var (
d = drive() d = drive()
fld = custom.ToCustomDriveItem(d.folderAtRoot()) fld = custom.ToCustomDriveItem(d.folderAt(root))
subFld = custom.ToCustomDriveItem(driveFolder(d.dir(folderName("parent")), folderID("parent"))) subFld = custom.ToCustomDriveItem(driveFolder(d.dir(folderName("parent")), folderID("parent")))
pack = custom.ToCustomDriveItem(driveItem(id(pkg), name(pkg), d.dir(), rootID, isPackage)) pack = custom.ToCustomDriveItem(driveItem(id(pkg), name(pkg), d.dir(), rootID, isPackage))
del = custom.ToCustomDriveItem(delItem(folderID(), rootID, isFolder)) del = custom.ToCustomDriveItem(delItem(folderID(), rootID, isFolder))
@ -1871,13 +1949,13 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_MakeFolderCollectionPath(
}{ }{
{ {
name: "root", name: "root",
folder: driveRootFolder(), folder: rootFolder(),
expect: basePath.String(), expect: basePath.String(),
expectErr: require.NoError, expectErr: require.NoError,
}, },
{ {
name: "folder", name: "folder",
folder: d.folderAtRoot(), folder: d.folderAt(root),
expect: folderPath.String(), expect: folderPath.String(),
expectErr: require.NoError, expectErr: require.NoError,
}, },
@ -1935,7 +2013,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
{ {
name: "one file at root", name: "one file at root",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage(d.fileAtRoot()), page: aPage(d.fileAt(root)),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 0, count.TotalDeleteFilesProcessed: 0,
@ -1954,8 +2032,8 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "many files in a hierarchy", name: "many files in a hierarchy",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
d.fileAtRoot(), d.fileAt(root),
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder, "fchld")), d.fileAt(folder, "fchld")),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -1976,7 +2054,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "many updates to the same file", name: "many updates to the same file",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
d.fileAtRoot(), d.fileAt(root),
driveItem(fileID(), fileName(1), d.dir(), rootID, isFile), driveItem(fileID(), fileName(1), d.dir(), rootID, isFile),
driveItem(fileID(), fileName(2), d.dir(), rootID, isFile)), driveItem(fileID(), fileName(2), d.dir(), rootID, isFile)),
expect: expected{ expect: expected{
@ -2031,7 +2109,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "create->delete", name: "create->delete",
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
d.fileAtRoot(), d.fileAt(root),
delItem(fileID(), rootID, isFile)), delItem(fileID(), rootID, isFile)),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -2049,7 +2127,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
name: "move->delete", name: "move->delete",
tree: treeWithFileAtRoot, tree: treeWithFileAtRoot,
page: aPage( page: aPage(
d.folderAtRoot(), d.folderAt(root),
d.fileAt(folder), d.fileAt(folder),
delItem(fileID(), folderID(), isFile)), delItem(fileID(), folderID(), isFile)),
expect: expected{ expect: expected{
@ -2069,7 +2147,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
tree: treeWithFileAtRoot, tree: treeWithFileAtRoot,
page: aPage( page: aPage(
delItem(fileID(), rootID, isFile), delItem(fileID(), rootID, isFile),
d.fileAtRoot()), d.fileAt(root)),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 1, count.TotalDeleteFilesProcessed: 1,
@ -2089,7 +2167,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
tree: treeWithRoot, tree: treeWithRoot,
page: aPage( page: aPage(
delItem(fileID(), rootID, isFile), delItem(fileID(), rootID, isFile),
d.fileAtRoot()), d.fileAt(root)),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
count.TotalDeleteFilesProcessed: 1, count.TotalDeleteFilesProcessed: 1,
@ -2140,10 +2218,18 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_EnumeratePageOfItems_file
func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() { func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
d := drive() d := drive()
unlimitedItemsPerContainer := newPagerLimiter(minimumLimitOpts())
unlimitedItemsPerContainer.limits.MaxItemsPerContainer = 9001
unlimitedTotalBytesAndFiles := newPagerLimiter(minimumLimitOpts())
unlimitedTotalBytesAndFiles.limits.MaxBytes = 9001
unlimitedTotalBytesAndFiles.limits.MaxItems = 9001
type expected struct { type expected struct {
counts countTD.Expected counts countTD.Expected
err require.ErrorAssertionFunc err require.ErrorAssertionFunc
shouldHitLimit bool shouldHitLimit bool
shouldHitCollLimit bool
skipped assert.ValueAssertionFunc skipped assert.ValueAssertionFunc
treeContainsFileIDsWithParent map[string]string treeContainsFileIDsWithParent map[string]string
countLiveFiles int countLiveFiles int
@ -2160,7 +2246,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{ {
name: "add new file", name: "add new file",
tree: treeWithRoot, tree: treeWithRoot,
file: d.fileAtRoot(), file: d.fileAt(root),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -2178,7 +2264,7 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{ {
name: "duplicate file", name: "duplicate file",
tree: treeWithFileAtRoot, tree: treeWithFileAtRoot,
file: d.fileAtRoot(), file: d.fileAt(root),
limiter: newPagerLimiter(control.DefaultOptions()), limiter: newPagerLimiter(control.DefaultOptions()),
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
@ -2260,8 +2346,45 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
{ {
name: "already at container file limit", name: "already at container file limit",
tree: treeWithFileAtRoot, tree: treeWithFileAtRoot,
file: d.fileAtRoot(2), file: d.fileAt(root, 2),
limiter: newPagerLimiter(minimumLimitOpts()), limiter: unlimitedTotalBytesAndFiles,
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
err: require.Error,
shouldHitCollLimit: true,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{
fileID(): rootID,
},
countLiveFiles: 1,
countTotalBytes: defaultFileSize,
},
},
{
name: "goes over total byte limit",
tree: treeWithRoot,
file: d.fileAt(root),
limiter: unlimitedItemsPerContainer,
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
// no error here, since byte limit shouldn't
// make the func return an error.
err: require.NoError,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{},
countLiveFiles: 0,
countTotalBytes: 0,
},
},
{
name: "already over total byte limit",
tree: treeWithFileAtRoot,
file: d.fileAt(root, 2),
limiter: unlimitedItemsPerContainer,
expect: expected{ expect: expected{
counts: countTD.Expected{ counts: countTD.Expected{
count.TotalFilesProcessed: 1, count.TotalFilesProcessed: 1,
@ -2276,23 +2399,6 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
countTotalBytes: defaultFileSize, countTotalBytes: defaultFileSize,
}, },
}, },
{
name: "goes over total byte limit",
tree: treeWithRoot,
file: d.fileAtRoot(),
limiter: newPagerLimiter(minimumLimitOpts()),
expect: expected{
counts: countTD.Expected{
count.TotalFilesProcessed: 1,
},
err: require.Error,
shouldHitLimit: true,
skipped: assert.Nil,
treeContainsFileIDsWithParent: map[string]string{},
countLiveFiles: 0,
countTotalBytes: 0,
},
},
} }
for _, test := range table { for _, test := range table {
suite.Run(test.name, func() { suite.Run(test.name, func() {
@ -2318,6 +2424,10 @@ func (suite *CollectionsTreeUnitSuite) TestCollections_AddFileToTree() {
test.expect.err(t, err, clues.ToCore(err)) test.expect.err(t, err, clues.ToCore(err))
test.expect.skipped(t, skipped) test.expect.skipped(t, skipped)
if test.expect.shouldHitCollLimit {
require.ErrorIs(t, err, errHitCollectionLimit, clues.ToCore(err))
}
if test.expect.shouldHitLimit { if test.expect.shouldHitLimit {
require.ErrorIs(t, err, errHitLimit, clues.ToCore(err)) require.ErrorIs(t, err, errHitLimit, clues.ToCore(err))
} }

View File

@ -2,8 +2,11 @@ package drive
import ( import (
"context" "context"
"sort"
"strings"
"github.com/alcionai/clues" "github.com/alcionai/clues"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
@ -78,33 +81,29 @@ type nodeyMcNodeFace struct {
// required for mid-enumeration folder moves, else we have to walk // required for mid-enumeration folder moves, else we have to walk
// the tree completely to remove the node from its old parent. // the tree completely to remove the node from its old parent.
parent *nodeyMcNodeFace parent *nodeyMcNodeFace
// the microsoft item ID. Mostly because we might as well // folder is the actual drive item for this directory.
// attach that to the node if we're also attaching the dir. // we save this so that, during post-processing, it can
id string // get moved into the collection files, which will cause
// single directory name, not a path // the collection processor to generate a permissions
name string // metadata file for the folder.
folder *custom.DriveItem
// contains the complete previous path // contains the complete previous path
prev path.Path prev path.Path
// folderID -> node // folderID -> node
children map[string]*nodeyMcNodeFace children map[string]*nodeyMcNodeFace
// file item ID -> file metadata // file item ID -> file metadata
files map[string]*custom.DriveItem files map[string]*custom.DriveItem
// for special handling protocols around packages
isPackage bool
} }
func newNodeyMcNodeFace( func newNodeyMcNodeFace(
parent *nodeyMcNodeFace, parent *nodeyMcNodeFace,
id, name string, folder *custom.DriveItem,
isPackage bool,
) *nodeyMcNodeFace { ) *nodeyMcNodeFace {
return &nodeyMcNodeFace{ return &nodeyMcNodeFace{
parent: parent, parent: parent,
id: id, folder: folder,
name: name, children: map[string]*nodeyMcNodeFace{},
children: map[string]*nodeyMcNodeFace{}, files: map[string]*custom.DriveItem{},
files: map[string]*custom.DriveItem{},
isPackage: isPackage,
} }
} }
@ -134,9 +133,14 @@ func (face *folderyMcFolderFace) getNode(id string) *nodeyMcNodeFace {
// values are updated to match (isPackage is assumed not to change). // values are updated to match (isPackage is assumed not to change).
func (face *folderyMcFolderFace) setFolder( func (face *folderyMcFolderFace) setFolder(
ctx context.Context, ctx context.Context,
parentID, id, name string, folder *custom.DriveItem,
isPackage bool,
) error { ) error {
var (
id = ptr.Val(folder.GetId())
name = ptr.Val(folder.GetName())
parentFolder = folder.GetParentReference()
)
// need to ensure we have the minimum requirements met for adding a node. // need to ensure we have the minimum requirements met for adding a node.
if len(id) == 0 { if len(id) == 0 {
return clues.NewWC(ctx, "missing folder ID") return clues.NewWC(ctx, "missing folder ID")
@ -146,16 +150,20 @@ func (face *folderyMcFolderFace) setFolder(
return clues.NewWC(ctx, "missing folder name") return clues.NewWC(ctx, "missing folder name")
} }
if len(parentID) == 0 && id != face.rootID { if (parentFolder == nil || len(ptr.Val(parentFolder.GetId())) == 0) &&
id != face.rootID {
return clues.NewWC(ctx, "non-root folder missing parent id") return clues.NewWC(ctx, "non-root folder missing parent id")
} }
// only set the root node once. // only set the root node once.
if id == face.rootID { if id == face.rootID {
if face.root == nil { if face.root == nil {
root := newNodeyMcNodeFace(nil, id, name, isPackage) root := newNodeyMcNodeFace(nil, folder)
face.root = root face.root = root
face.folderIDToNode[id] = root face.folderIDToNode[id] = root
} else {
// but update the folder each time, to stay in sync with changes
face.root.folder = folder
} }
return nil return nil
@ -167,7 +175,7 @@ func (face *folderyMcFolderFace) setFolder(
// 3. existing folder migrated to new location. // 3. existing folder migrated to new location.
// 4. tombstoned folder restored. // 4. tombstoned folder restored.
parent, ok := face.folderIDToNode[parentID] parentNode, ok := face.folderIDToNode[ptr.Val(parentFolder.GetId())]
if !ok { if !ok {
return clues.NewWC(ctx, "folder added before parent") return clues.NewWC(ctx, "folder added before parent")
} }
@ -184,9 +192,9 @@ func (face *folderyMcFolderFace) setFolder(
if zombey, tombstoned := face.tombstones[id]; tombstoned { if zombey, tombstoned := face.tombstones[id]; tombstoned {
delete(face.tombstones, id) delete(face.tombstones, id)
zombey.parent = parent zombey.parent = parentNode
zombey.name = name zombey.folder = folder
parent.children[id] = zombey parentNode.children[id] = zombey
face.folderIDToNode[id] = zombey face.folderIDToNode[id] = zombey
return nil return nil
@ -204,21 +212,21 @@ func (face *folderyMcFolderFace) setFolder(
// technically shouldn't be possible but better to keep the problem tracked // technically shouldn't be possible but better to keep the problem tracked
// just in case. // just in case.
logger.Ctx(ctx).Info("non-root folder already exists with no parent ref") logger.Ctx(ctx).Info("non-root folder already exists with no parent ref")
} else if nodey.parent != parent { } else if nodey.parent != parentNode {
// change type 3. we need to ensure the old parent stops pointing to this node. // change type 3. we need to ensure the old parent stops pointing to this node.
delete(nodey.parent.children, id) delete(nodey.parent.children, id)
} }
nodey.name = name nodey.parent = parentNode
nodey.parent = parent nodey.folder = folder
} else { } else {
// change type 1: new addition // change type 1: new addition
nodey = newNodeyMcNodeFace(parent, id, name, isPackage) nodey = newNodeyMcNodeFace(parentNode, folder)
} }
// ensure the parent points to this node, and that the node is registered // ensure the parent points to this node, and that the node is registered
// in the map of all nodes in the tree. // in the map of all nodes in the tree.
parent.children[id] = nodey parentNode.children[id] = nodey
face.folderIDToNode[id] = nodey face.folderIDToNode[id] = nodey
return nil return nil
@ -226,8 +234,10 @@ func (face *folderyMcFolderFace) setFolder(
func (face *folderyMcFolderFace) setTombstone( func (face *folderyMcFolderFace) setTombstone(
ctx context.Context, ctx context.Context,
id string, folder *custom.DriveItem,
) error { ) error {
id := ptr.Val(folder.GetId())
if len(id) == 0 { if len(id) == 0 {
return clues.NewWC(ctx, "missing tombstone folder ID") return clues.NewWC(ctx, "missing tombstone folder ID")
} }
@ -254,7 +264,7 @@ func (face *folderyMcFolderFace) setTombstone(
} }
if _, alreadyBuried := face.tombstones[id]; !alreadyBuried { if _, alreadyBuried := face.tombstones[id]; !alreadyBuried {
face.tombstones[id] = newNodeyMcNodeFace(nil, id, "", false) face.tombstones[id] = newNodeyMcNodeFace(nil, folder)
} }
return nil return nil
@ -298,7 +308,7 @@ func (face *folderyMcFolderFace) setPreviousPath(
return nil return nil
} }
zombey := newNodeyMcNodeFace(nil, folderID, "", false) zombey := newNodeyMcNodeFace(nil, custom.NewDriveItem(folderID, ""))
zombey.prev = prev zombey.prev = prev
face.tombstones[folderID] = zombey face.tombstones[folderID] = zombey
@ -318,13 +328,20 @@ func (face *folderyMcFolderFace) hasFile(id string) bool {
// file was already added to the tree and is getting relocated, // file was already added to the tree and is getting relocated,
// this func will update and/or clean up all the old references. // this func will update and/or clean up all the old references.
func (face *folderyMcFolderFace) addFile( func (face *folderyMcFolderFace) addFile(
parentID, id string,
file *custom.DriveItem, file *custom.DriveItem,
) error { ) error {
if len(parentID) == 0 { var (
parentFolder = file.GetParentReference()
id = ptr.Val(file.GetId())
parentID string
)
if parentFolder == nil || len(ptr.Val(parentFolder.GetId())) == 0 {
return clues.New("item added without parent folder ID") return clues.New("item added without parent folder ID")
} }
parentID = ptr.Val(parentFolder.GetId())
if len(id) == 0 { if len(id) == 0 {
return clues.New("item added without ID") return clues.New("item added without ID")
} }
@ -419,17 +436,22 @@ func (face *folderyMcFolderFace) walkTreeAndBuildCollections(
return nil return nil
} }
isRoot := node == face.root var (
id = ptr.Val(node.folder.GetId())
name = ptr.Val(node.folder.GetName())
isPackage = node.folder.GetPackageEscaped() != nil
isRoot = node == face.root
)
if !isRoot { if !isRoot {
location = location.Append(node.name) location = location.Append(name)
} }
for _, child := range node.children { for _, child := range node.children {
err := face.walkTreeAndBuildCollections( err := face.walkTreeAndBuildCollections(
child, child,
location, location,
node.isPackage || isChildOfPackage, isPackage || isChildOfPackage,
result) result)
if err != nil { if err != nil {
return err return err
@ -444,19 +466,134 @@ func (face *folderyMcFolderFace) walkTreeAndBuildCollections(
"path_suffix", location.Elements()) "path_suffix", location.Elements())
} }
files := node.files
if !isRoot {
// add the folder itself to the list of files inside the folder.
// that will cause the collection processor to generate a metadata
// file to hold the folder's permissions.
files = maps.Clone(node.files)
files[id] = node.folder
}
cbl := collectable{ cbl := collectable{
currPath: collectionPath, currPath: collectionPath,
files: node.files, files: files,
folderID: node.id, folderID: id,
isPackageOrChildOfPackage: node.isPackage || isChildOfPackage, isPackageOrChildOfPackage: isPackage || isChildOfPackage,
prevPath: node.prev, prevPath: node.prev,
} }
result[node.id] = cbl result[id] = cbl
return nil return nil
} }
type idPrevPathTup struct {
id string
prevPath string
}
// fuses the collectables and old prevPaths into a
// new prevPaths map.
func (face *folderyMcFolderFace) generateNewPreviousPaths(
collectables map[string]collectable,
prevPaths map[string]string,
) (map[string]string, error) {
var (
// id -> currentPath
results = map[string]string{}
// prevPath -> currentPath
movedPaths = map[string]string{}
// prevPath -> {}
tombstoned = map[string]struct{}{}
)
// first, move all collectables into the new maps
for id, cbl := range collectables {
if cbl.currPath == nil {
tombstoned[cbl.prevPath.String()] = struct{}{}
continue
}
cp := cbl.currPath.String()
results[id] = cp
if cbl.prevPath != nil && cbl.prevPath.String() != cp {
movedPaths[cbl.prevPath.String()] = cp
}
}
// next, create a slice of tuples representing any
// old prevPath entry whose ID isn't already bound to
// a collectable.
unseenPrevPaths := []idPrevPathTup{}
for id, p := range prevPaths {
// if the current folder was tombstoned, skip it
if _, ok := tombstoned[p]; ok {
continue
}
if _, ok := results[id]; !ok {
unseenPrevPaths = append(unseenPrevPaths, idPrevPathTup{id, p})
}
}
// sort the slice by path, ascending.
// This ensures we work from root to leaf when replacing prefixes,
// and thus we won't need to walk every unseen path from leaf to
// root looking for a matching prefix.
sortByLeastPath := func(i, j int) bool {
return unseenPrevPaths[i].prevPath < unseenPrevPaths[j].prevPath
}
sort.Slice(unseenPrevPaths, sortByLeastPath)
for _, un := range unseenPrevPaths {
elems := path.NewElements(un.prevPath)
pb, err := path.Builder{}.UnescapeAndAppend(elems...)
if err != nil {
return nil, err
}
parent := pb.Dir().String()
// if the parent was tombstoned, add this prevPath entry to the
// tombstoned map; that'll allow the tombstone identification to
// cascade to children, and it won't get added to the results.
if _, ok := tombstoned[parent]; ok {
tombstoned[un.prevPath] = struct{}{}
continue
}
// if the parent wasn't moved, add the same path to the result set
parentCurrentPath, ok := movedPaths[parent]
if !ok {
results[un.id] = un.prevPath
continue
}
// if the parent was moved, replace the prefix and
// add it to the result set
// TODO: should probably use path.UpdateParent for this.
// but I want the quality-of-life of feeding it strings
// instead of parsing strings to paths here first.
newPath := strings.Replace(un.prevPath, parent, parentCurrentPath, 1)
results[un.id] = newPath
// add the current string to the moved list, that'll allow it to cascade to all children.
movedPaths[un.prevPath] = newPath
}
return results, nil
}
func (face *folderyMcFolderFace) generateExcludeItemIDs() map[string]struct{} { func (face *folderyMcFolderFace) generateExcludeItemIDs() map[string]struct{} {
result := map[string]struct{}{} result := map[string]struct{}{}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"testing" "testing"
"time" "time"
@ -214,13 +215,13 @@ func collWithMBHAndOpts(
func aPage(items ...models.DriveItemable) nextPage { func aPage(items ...models.DriveItemable) nextPage {
return nextPage{ return nextPage{
Items: append([]models.DriveItemable{driveRootFolder()}, items...), Items: append([]models.DriveItemable{rootFolder()}, items...),
} }
} }
func aPageWReset(items ...models.DriveItemable) nextPage { func aPageWReset(items ...models.DriveItemable) nextPage {
return nextPage{ return nextPage{
Items: append([]models.DriveItemable{driveRootFolder()}, items...), Items: append([]models.DriveItemable{rootFolder()}, items...),
Reset: true, Reset: true,
} }
} }
@ -310,9 +311,15 @@ func aColl(
) *collectionAssertion { ) *collectionAssertion {
ids := make([]string, 0, 2*len(fileIDs)) ids := make([]string, 0, 2*len(fileIDs))
for _, fUD := range fileIDs { for _, fID := range fileIDs {
ids = append(ids, fUD+metadata.DataFileSuffix) ids = append(ids, fID+metadata.DataFileSuffix)
ids = append(ids, fUD+metadata.MetaFileSuffix) ids = append(ids, fID+metadata.MetaFileSuffix)
}
// should expect all non-root, non-tombstone collections to contain
// a dir meta file for storing permissions.
if curr != nil && !strings.HasSuffix(curr.Folder(false), root) {
ids = append(ids, metadata.DirMetaFileSuffix)
} }
return &collectionAssertion{ return &collectionAssertion{
@ -453,9 +460,10 @@ func newTree(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace { func treeWithRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := newFolderyMcFolderFace(defaultTreePfx(t, d), rootID) tree := newFolderyMcFolderFace(defaultTreePfx(t, d), rootID)
root := custom.ToCustomDriveItem(rootFolder())
//nolint:forbidigo //nolint:forbidigo
err := tree.setFolder(context.Background(), "", rootID, rootName, false) err := tree.setFolder(context.Background(), root)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return tree return tree
@ -477,9 +485,10 @@ func treeWithFoldersAfterReset(t *testing.T, d *deltaDrive) *folderyMcFolderFace
func treeWithTombstone(t *testing.T, d *deltaDrive) *folderyMcFolderFace { func treeWithTombstone(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d) tree := treeWithRoot(t, d)
folder := custom.ToCustomDriveItem(d.folderAt(root))
//nolint:forbidigo //nolint:forbidigo
err := tree.setTombstone(context.Background(), folderID()) err := tree.setTombstone(context.Background(), folder)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return tree return tree
@ -487,13 +496,15 @@ func treeWithTombstone(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFolders(t *testing.T, d *deltaDrive) *folderyMcFolderFace { func treeWithFolders(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d) tree := treeWithRoot(t, d)
parent := custom.ToCustomDriveItem(d.folderAt(root, "parent"))
folder := custom.ToCustomDriveItem(d.folderAt("parent"))
//nolint:forbidigo //nolint:forbidigo
err := tree.setFolder(context.Background(), rootID, folderID("parent"), folderName("parent"), true) err := tree.setFolder(context.Background(), parent)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
//nolint:forbidigo //nolint:forbidigo
err = tree.setFolder(context.Background(), folderID("parent"), folderID(), folderName(), false) err = tree.setFolder(context.Background(), folder)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return tree return tree
@ -502,7 +513,8 @@ func treeWithFolders(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFileAtRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace { func treeWithFileAtRoot(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithRoot(t, d) tree := treeWithRoot(t, d)
err := tree.addFile(rootID, fileID(), custom.ToCustomDriveItem(d.fileAtRoot())) f := custom.ToCustomDriveItem(d.fileAt(root))
err := tree.addFile(f)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return tree return tree
@ -518,7 +530,8 @@ func treeWithDeletedFile(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
func treeWithFileInFolder(t *testing.T, d *deltaDrive) *folderyMcFolderFace { func treeWithFileInFolder(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
tree := treeWithFolders(t, d) tree := treeWithFolders(t, d)
err := tree.addFile(folderID(), fileID(), custom.ToCustomDriveItem(d.fileAt(folder))) f := custom.ToCustomDriveItem(d.fileAt(folder))
err := tree.addFile(f)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return tree return tree
@ -545,7 +558,7 @@ func fullTree(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
} }
func fullTreeWithNames( func fullTreeWithNames(
parentFolderX, tombstoneX any, parentFolderSuffix, tombstoneSuffix any,
) func(t *testing.T, d *deltaDrive) *folderyMcFolderFace { ) func(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
return func(t *testing.T, d *deltaDrive) *folderyMcFolderFace { return func(t *testing.T, d *deltaDrive) *folderyMcFolderFace {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
@ -553,56 +566,47 @@ func fullTreeWithNames(
tree := treeWithRoot(t, d) tree := treeWithRoot(t, d)
// file in root // file "r" in root
df := driveFile(d.dir(), rootID, "r") df := custom.ToCustomDriveItem(d.fileAt(root, "r"))
err := tree.addFile( err := tree.addFile(df)
rootID,
fileID("r"),
custom.ToCustomDriveItem(df))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// root -> folderID(parentX) // root -> folderID(parentX)
err = tree.setFolder(ctx, rootID, folderID(parentFolderX), folderName(parentFolderX), false) parent := custom.ToCustomDriveItem(d.folderAt(root, parentFolderSuffix))
err = tree.setFolder(ctx, parent)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// file in folderID(parentX) // file "p" in folderID(parentX)
df = driveFile(d.dir(folderName(parentFolderX)), folderID(parentFolderX), "p") df = custom.ToCustomDriveItem(d.fileAt(parentFolderSuffix, "p"))
err = tree.addFile( err = tree.addFile(df)
folderID(parentFolderX),
fileID("p"),
custom.ToCustomDriveItem(df))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// folderID(parentX) -> folderID() // folderID(parentX) -> folderID()
err = tree.setFolder(ctx, folderID(parentFolderX), folderID(), folderName(), false) fld := custom.ToCustomDriveItem(d.folderAt(parentFolderSuffix))
err = tree.setFolder(ctx, fld)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// file in folderID() // file "f" in folderID()
df = driveFile(d.dir(folderName()), folderID()) df = custom.ToCustomDriveItem(d.fileAt(folder, "f"))
err = tree.addFile( err = tree.addFile(df)
folderID(),
fileID(),
custom.ToCustomDriveItem(df))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// tombstone - have to set a non-tombstone folder first, // tombstone - have to set a non-tombstone folder first,
// then add the item, // then add the item,
// then tombstone the folder // then tombstone the folder
err = tree.setFolder(ctx, rootID, folderID(tombstoneX), folderName(tombstoneX), false) tomb := custom.ToCustomDriveItem(d.folderAt(root, tombstoneSuffix))
err = tree.setFolder(ctx, tomb)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// file in tombstone // file "t" in tombstone
df = driveFile(d.dir(folderName(tombstoneX)), folderID(tombstoneX), "t") df = custom.ToCustomDriveItem(d.fileAt(tombstoneSuffix, "t"))
err = tree.addFile( err = tree.addFile(df)
folderID(tombstoneX),
fileID("t"),
custom.ToCustomDriveItem(df))
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = tree.setTombstone(ctx, folderID(tombstoneX)) err = tree.setTombstone(ctx, tomb)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// deleted file // deleted file "d"
tree.deleteFile(fileID("d")) tree.deleteFile(fileID("d"))
return tree return tree
@ -1355,22 +1359,24 @@ func (dd *deltaDrive) fileAt(
parentSuffix any, parentSuffix any,
fileSuffixes ...any, fileSuffixes ...any,
) models.DriveItemable { ) models.DriveItemable {
return driveItem( if parentSuffix == root {
fileID(fileSuffixes...), return driveItem(
fileName(fileSuffixes...), fileID(fileSuffixes...),
dd.dir(folderName(parentSuffix)), fileName(fileSuffixes...),
folderID(parentSuffix), dd.dir(),
isFile) rootID,
} isFile)
}
func (dd *deltaDrive) fileAtRoot(
fileSuffixes ...any,
) models.DriveItemable {
return driveItem( return driveItem(
fileID(fileSuffixes...), fileID(fileSuffixes...),
fileName(fileSuffixes...), fileName(fileSuffixes...),
// the file's parent directory isn't used;
// this parameter is an artifact of the driveItem
// api and doesn't need to be populated for test
// success.
dd.dir(), dd.dir(),
rootID, folderID(parentSuffix),
isFile) isFile)
} }
@ -1391,28 +1397,25 @@ func (dd *deltaDrive) fileWURLAtRoot(
return di return di
} }
func (dd *deltaDrive) fileWSizeAtRoot(
size int64,
fileSuffixes ...any,
) models.DriveItemable {
return driveItemWSize(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
dd.dir(),
rootID,
size,
isFile)
}
func (dd *deltaDrive) fileWSizeAt( func (dd *deltaDrive) fileWSizeAt(
size int64, size int64,
parentSuffix any, parentSuffix any,
fileSuffixes ...any, fileSuffixes ...any,
) models.DriveItemable { ) models.DriveItemable {
if parentSuffix == root {
return driveItemWSize(
fileID(fileSuffixes...),
fileName(fileSuffixes...),
dd.dir(),
rootID,
size,
isFile)
}
return driveItemWSize( return driveItemWSize(
fileID(fileSuffixes...), fileID(fileSuffixes...),
fileName(fileSuffixes...), fileName(fileSuffixes...),
dd.dir(folderName(parentSuffix)), dd.dir(),
folderID(parentSuffix), folderID(parentSuffix),
size, size,
isFile) isFile)
@ -1442,9 +1445,9 @@ func driveFolder(
isFolder) isFolder)
} }
func driveRootFolder() models.DriveItemable { func rootFolder() models.DriveItemable {
rootFolder := models.NewDriveItem() rootFolder := models.NewDriveItem()
rootFolder.SetName(ptr.To(rootName)) rootFolder.SetName(ptr.To(root))
rootFolder.SetId(ptr.To(rootID)) rootFolder.SetId(ptr.To(rootID))
rootFolder.SetRoot(models.NewRoot()) rootFolder.SetRoot(models.NewRoot())
rootFolder.SetFolder(models.NewFolder()) rootFolder.SetFolder(models.NewFolder())
@ -1452,29 +1455,40 @@ func driveRootFolder() models.DriveItemable {
return rootFolder return rootFolder
} }
func (dd *deltaDrive) folderAtRoot(
folderSuffixes ...any,
) models.DriveItemable {
return driveItem(
folderID(folderSuffixes...),
folderName(folderSuffixes...),
dd.dir(),
rootID,
isFolder)
}
func (dd *deltaDrive) folderAt( func (dd *deltaDrive) folderAt(
parentSuffix any, parentSuffix any,
folderSuffixes ...any, folderSuffixes ...any,
) models.DriveItemable { ) models.DriveItemable {
if parentSuffix == root {
return driveItem(
folderID(folderSuffixes...),
folderName(folderSuffixes...),
dd.dir(),
rootID,
isFolder)
}
return driveItem( return driveItem(
folderID(folderSuffixes...), folderID(folderSuffixes...),
folderName(folderSuffixes...), folderName(folderSuffixes...),
// we should be putting in the full location here, not just the
// parent suffix. But that full location would be unused because
// our unit tests don't utilize folder subselection (which is the
// only reason we need to provide the dir).
dd.dir(folderName(parentSuffix)), dd.dir(folderName(parentSuffix)),
folderID(parentSuffix), folderID(parentSuffix),
isFolder) isFolder)
} }
func (dd *deltaDrive) packageAtRoot() models.DriveItemable {
return driveItem(
folderID(pkg),
folderName(pkg),
dd.dir(),
rootID,
isPackage)
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// id, name, path factories // id, name, path factories
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -1482,6 +1496,16 @@ func (dd *deltaDrive) folderAt(
// assumption is only one suffix per id. Mostly using // assumption is only one suffix per id. Mostly using
// the variadic as an "optional" extension. // the variadic as an "optional" extension.
func id(v string, suffixes ...any) string { func id(v string, suffixes ...any) string {
if len(suffixes) > 1 {
// this should fail any tests. we could pass in a
// testing.T instead and fail the call here, but that
// produces a whole lot of chaff where this check should
// still get us the expected failure
return fmt.Sprintf(
"too many suffixes in the ID; should only be 0 or 1, got %d",
len(suffixes))
}
id := fmt.Sprintf("id_%s", v) id := fmt.Sprintf("id_%s", v)
// a bit weird, but acts as a quality of life // a bit weird, but acts as a quality of life
@ -1505,6 +1529,16 @@ func id(v string, suffixes ...any) string {
// assumption is only one suffix per name. Mostly using // assumption is only one suffix per name. Mostly using
// the variadic as an "optional" extension. // the variadic as an "optional" extension.
func name(v string, suffixes ...any) string { func name(v string, suffixes ...any) string {
if len(suffixes) > 1 {
// this should fail any tests. we could pass in a
// testing.T instead and fail the call here, but that
// produces a whole lot of chaff where this check should
// still get us the expected failure
return fmt.Sprintf(
"too many suffixes in the Name; should only be 0 or 1, got %d",
len(suffixes))
}
name := fmt.Sprintf("n_%s", v) name := fmt.Sprintf("n_%s", v)
// a bit weird, but acts as a quality of life // a bit weird, but acts as a quality of life
@ -1542,20 +1576,19 @@ func toPath(elems ...string) string {
} }
// produces the full path for the provided drive // produces the full path for the provided drive
func (dd *deltaDrive) strPath(elems ...string) string { func (dd *deltaDrive) strPath(t *testing.T, elems ...string) string {
return toPath(append( return dd.fullPath(t, elems...).String()
[]string{
tenant,
path.OneDriveService.String(),
user,
path.FilesCategory.String(),
odConsts.DriveFolderPrefixBuilder(dd.id).String(),
},
elems...)...)
} }
func (dd *deltaDrive) fullPath(t *testing.T, elems ...string) path.Path { func (dd *deltaDrive) fullPath(t *testing.T, elems ...string) path.Path {
p, err := path.FromDataLayerPath(dd.strPath(elems...), false) p, err := odConsts.DriveFolderPrefixBuilder(dd.id).
Append(elems...).
ToDataLayerPath(
tenant,
user,
path.OneDriveService,
path.FilesCategory,
false)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
return p return p
@ -1564,9 +1597,9 @@ func (dd *deltaDrive) fullPath(t *testing.T, elems ...string) path.Path {
// produces a complete path prefix up to the drive root folder with any // produces a complete path prefix up to the drive root folder with any
// elements passed in appended to the generated prefix. // elements passed in appended to the generated prefix.
func (dd *deltaDrive) dir(elems ...string) string { func (dd *deltaDrive) dir(elems ...string) string {
return toPath(append( return odConsts.DriveFolderPrefixBuilder(dd.id).
[]string{odConsts.DriveFolderPrefixBuilder(dd.id).String()}, Append(elems...).
elems...)...) String()
} }
// common item names // common item names
@ -1583,7 +1616,7 @@ const (
nav = "nav" nav = "nav"
pkg = "package" pkg = "package"
rootID = odConsts.RootID rootID = odConsts.RootID
rootName = odConsts.RootPathDir root = odConsts.RootPathDir
subfolder = "subfolder" subfolder = "subfolder"
tenant = "t" tenant = "t"
user = "u" user = "u"

View File

@ -6,7 +6,10 @@ import (
"github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control"
) )
var errHitLimit = clues.New("hit limiter limits") var (
errHitLimit = clues.New("hit limiter limits")
errHitCollectionLimit = clues.New("hit item limits within the current collection")
)
type driveEnumerationStats struct { type driveEnumerationStats struct {
numPages int numPages int
@ -111,9 +114,22 @@ func (l pagerLimiter) hitItemLimit(itemCount int) bool {
return l.enabled() && itemCount >= l.limits.MaxItems return l.enabled() && itemCount >= l.limits.MaxItems
} }
// hitTotalBytesLimit returns true if the limiter is enabled and has reached the limit // alreadyHitTotalBytesLimit returns true if the limiter is enabled and has reached the limit
// for the accumulated byte size of all items (the file contents, not the item metadata) // for the accumulated byte size of all items (the file contents, not the item metadata)
// added to collections for this backup. // added to collections for this backup.
func (l pagerLimiter) hitTotalBytesLimit(i int64) bool { func (l pagerLimiter) alreadyHitTotalBytesLimit(i int64) bool {
return l.enabled() && i >= l.limits.MaxBytes return l.enabled() && i > l.limits.MaxBytes
}
// willStepOverBytesLimit returns true if the limiter is enabled and the provided addition
// of bytes is greater than the limit plus some padding (to ensure we can always hit
// the limit).
func (l pagerLimiter) willStepOverBytesLimit(current, addition int64) bool {
if !l.enabled() {
return false
}
limitPlusPadding := int64(float64(l.limits.MaxBytes) * 1.03)
return (current + addition) > limitPlusPadding
} }

View File

@ -34,9 +34,14 @@ type backupLimitTest struct {
// Collection name -> set of item IDs. We can't check item data because // Collection name -> set of item IDs. We can't check item data because
// that's not mocked out. Metadata is checked separately. // that's not mocked out. Metadata is checked separately.
expectedItemIDsInCollection map[string][]string expectedItemIDsInCollection map[string][]string
// Collection name -> set of item IDs. We can't check item data because
// that's not mocked out. Metadata is checked separately.
// the tree version has some different (more accurate) expectations
// for success
expectedItemIDsInCollectionTree map[string][]string
} }
func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest { func backupLimitTable(t *testing.T, d1, d2 *deltaDrive) []backupLimitTest {
return []backupLimitTest{ return []backupLimitTest{
{ {
name: "OneDrive SinglePage ExcludeItemsOverMaxSize", name: "OneDrive SinglePage ExcludeItemsOverMaxSize",
@ -50,12 +55,13 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
}, },
enumerator: driveEnumerator( enumerator: driveEnumerator(
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d1.fileWSizeAtRoot(7, "f1"), aPage(
d1.fileWSizeAtRoot(1, "f2"), d1.fileWSizeAt(7, root, "f1"),
d1.fileWSizeAtRoot(1, "f3"))))), d1.fileWSizeAt(1, root, "f2"),
d1.fileWSizeAt(1, root, "f3"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f2"), fileID("f3")},
}, },
}, },
{ {
@ -70,12 +76,13 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
}, },
enumerator: driveEnumerator( enumerator: driveEnumerator(
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d1.fileWSizeAtRoot(1, "f1"), aPage(
d1.fileWSizeAtRoot(2, "f2"), d1.fileWSizeAt(1, root, "f1"),
d1.fileWSizeAtRoot(1, "f3"))))), d1.fileWSizeAt(2, root, "f2"),
d1.fileWSizeAt(1, root, "f3"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")}, d1.strPath(t): {fileID("f1"), fileID("f2")},
}, },
}, },
{ {
@ -90,14 +97,15 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
}, },
enumerator: driveEnumerator( enumerator: driveEnumerator(
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d1.fileWSizeAtRoot(1, "f1"), aPage(
d1.folderAtRoot(), d1.fileWSizeAt(1, root, "f1"),
d1.fileWSizeAt(2, folder, "f2"), d1.folderAt(root),
d1.fileWSizeAt(1, folder, "f3"))))), d1.fileWSizeAt(2, folder, "f2"),
d1.fileWSizeAt(1, folder, "f3"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1")}, d1.strPath(t): {fileID("f1")},
d1.strPath(folderName()): {folderID(), fileID("f2")}, d1.strPath(t, folderName()): {folderID(), fileID("f2")},
}, },
}, },
{ {
@ -112,15 +120,16 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
}, },
enumerator: driveEnumerator( enumerator: driveEnumerator(
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d1.fileAtRoot("f1"), aPage(
d1.fileAtRoot("f2"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f3"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f4"), d1.fileAt(root, "f3"),
d1.fileAtRoot("f5"), d1.fileAt(root, "f4"),
d1.fileAtRoot("f6"))))), d1.fileAt(root, "f5"),
d1.fileAt(root, "f6"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
}, },
}, },
{ {
@ -137,19 +146,19 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2")), d1.fileAt(root, "f2")),
aPage( aPage(
// Repeated items shouldn't count against the limit. // Repeated items shouldn't count against the limit.
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f3"), d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"), d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"), d1.fileAt(folder, "f5"),
d1.fileAt(folder, "f6"))))), d1.fileAt(folder, "f6"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")}, d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(folderName()): {folderID(), fileID("f3")}, d1.strPath(t, folderName()): {folderID(), fileID("f3")},
}, },
}, },
{ {
@ -166,16 +175,16 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2")), d1.fileAt(root, "f2")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f3"), d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"), d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"), d1.fileAt(folder, "f5"),
d1.fileAt(folder, "f6"))))), d1.fileAt(folder, "f6"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")}, d1.strPath(t): {fileID("f1"), fileID("f2")},
}, },
}, },
{ {
@ -192,18 +201,22 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f3")), d1.fileAt(root, "f3")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f4"), d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"))))), d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
// Root has an additional item. It's hard to fix that in the code // Root has an additional item. It's hard to fix that in the code though.
// though. d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(): {fileID("f1"), fileID("f2")}, d1.strPath(t, folderName()): {folderID(), fileID("f4")},
d1.strPath(folderName()): {folderID(), fileID("f4")}, },
expectedItemIDsInCollectionTree: map[string][]string{
// the tree version doesn't have this problem.
d1.strPath(t): {fileID("f1")},
d1.strPath(t, folderName()): {folderID(), fileID("f4")},
}, },
}, },
{ {
@ -220,18 +233,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f1"), d1.fileAt(folder, "f1"),
d1.fileAt(folder, "f2")), d1.fileAt(folder, "f2")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
// Updated item that shouldn't count against the limit a second time. // Updated item that shouldn't count against the limit a second time.
d1.fileAt(folder, "f2"), d1.fileAt(folder, "f2"),
d1.fileAt(folder, "f3"), d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4"))))), d1.fileAt(folder, "f4"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {}, d1.strPath(t): {},
d1.strPath(folderName()): {folderID(), fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t, folderName()): {folderID(), fileID("f1"), fileID("f2"), fileID("f3")},
}, },
}, },
{ {
@ -248,19 +261,26 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
// Put folder 0 at limit. // Put root/folder at limit.
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f3"), d1.fileAt(folder, "f3"),
d1.fileAt(folder, "f4")), d1.fileAt(folder, "f4")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
// Try to move item from root to folder 0 which is already at the limit. // Try to move item from root to folder 0 which is already at the limit.
d1.fileAt(folder, "f1"))))), d1.fileAt(folder, "f1"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2")}, d1.strPath(t): {fileID("f1"), fileID("f2")},
d1.strPath(folderName()): {folderID(), fileID("f3"), fileID("f4")}, d1.strPath(t, folderName()): {folderID(), fileID("f3"), fileID("f4")},
},
expectedItemIDsInCollectionTree: map[string][]string{
d1.strPath(t): {fileID("f2")},
// note that the tree version allows f1 to get moved.
// we've already committed to backing up the file as part of the preview,
// it doesn't seem rational to prevent its movement
d1.strPath(t, folderName()): {folderID(), fileID("f3"), fileID("f4"), fileID("f1")},
}, },
}, },
{ {
@ -277,18 +297,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f3")), d1.fileAt(root, "f3")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f4")), d1.fileAt(folder, "f4")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f5"))))), d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")}, d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
}, },
}, },
{ {
@ -305,21 +325,21 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f3")), d1.fileAt(root, "f3")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f4"), d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5"), d1.fileAt(folder, "f5"),
// This container shouldn't be returned. // This container shouldn't be returned.
d1.folderAtRoot(2), d1.folderAt(root, 2),
d1.fileAt(2, "f7"), d1.fileAt(2, "f7"),
d1.fileAt(2, "f8"), d1.fileAt(2, "f8"),
d1.fileAt(2, "f9"))))), d1.fileAt(2, "f9"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")}, d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
}, },
}, },
{ {
@ -336,22 +356,22 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f3")), d1.fileAt(root, "f3")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f4"), d1.fileAt(folder, "f4"),
d1.fileAt(folder, "f5")), d1.fileAt(folder, "f5")),
aPage( aPage(
// This container shouldn't be returned. // This container shouldn't be returned.
d1.folderAtRoot(2), d1.folderAt(root, 2),
d1.fileAt(2, "f7"), d1.fileAt(2, "f7"),
d1.fileAt(2, "f8"), d1.fileAt(2, "f8"),
d1.fileAt(2, "f9"))))), d1.fileAt(2, "f9"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")}, d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
}, },
}, },
{ {
@ -366,22 +386,24 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
}, },
enumerator: driveEnumerator( enumerator: driveEnumerator(
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d1.fileAtRoot("f1"), aPage(
d1.fileAtRoot("f2"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f3"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f4"), d1.fileAt(root, "f3"),
d1.fileAtRoot("f5")))), d1.fileAt(root, "f4"),
d1.fileAt(root, "f5")))),
d2.newEnumer().with( d2.newEnumer().with(
delta(id(deltaURL), nil).with(aPage( delta(id(deltaURL), nil).with(
d2.fileAtRoot("f1"), aPage(
d2.fileAtRoot("f2"), d2.fileAt(root, "f1"),
d2.fileAtRoot("f3"), d2.fileAt(root, "f2"),
d2.fileAtRoot("f4"), d2.fileAt(root, "f3"),
d2.fileAtRoot("f5"))))), d2.fileAt(root, "f4"),
d2.fileAt(root, "f5"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d2.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d2.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
}, },
}, },
{ {
@ -397,18 +419,18 @@ func backupLimitTable(d1, d2 *deltaDrive) []backupLimitTest {
d1.newEnumer().with( d1.newEnumer().with(
delta(id(deltaURL), nil).with( delta(id(deltaURL), nil).with(
aPage( aPage(
d1.fileAtRoot("f1"), d1.fileAt(root, "f1"),
d1.fileAtRoot("f2"), d1.fileAt(root, "f2"),
d1.fileAtRoot("f3")), d1.fileAt(root, "f3")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f4")), d1.fileAt(folder, "f4")),
aPage( aPage(
d1.folderAtRoot(), d1.folderAt(root),
d1.fileAt(folder, "f5"))))), d1.fileAt(folder, "f5"))))),
expectedItemIDsInCollection: map[string][]string{ expectedItemIDsInCollection: map[string][]string{
d1.strPath(): {fileID("f1"), fileID("f2"), fileID("f3")}, d1.strPath(t): {fileID("f1"), fileID("f2"), fileID("f3")},
d1.strPath(folderName()): {folderID(), fileID("f4"), fileID("f5")}, d1.strPath(t, folderName()): {folderID(), fileID("f4"), fileID("f5")},
}, },
}, },
} }
@ -427,8 +449,6 @@ func (suite *LimiterUnitSuite) TestGet_PreviewLimits_noTree() {
// checks that don't examine metadata, collection states, etc. They really just // checks that don't examine metadata, collection states, etc. They really just
// check the expected items appear. // check the expected items appear.
func (suite *LimiterUnitSuite) TestGet_PreviewLimits_tree() { func (suite *LimiterUnitSuite) TestGet_PreviewLimits_tree() {
suite.T().Skip("TODO: unskip when tree produces collections")
opts := control.DefaultOptions() opts := control.DefaultOptions()
opts.ToggleFeatures.UseDeltaTree = true opts.ToggleFeatures.UseDeltaTree = true
@ -441,7 +461,7 @@ func iterGetPreviewLimitsTests(
) { ) {
d1, d2 := drive(), drive(2) d1, d2 := drive(), drive(2)
for _, test := range backupLimitTable(d1, d2) { for _, test := range backupLimitTable(suite.T(), d1, d2) {
suite.Run(test.name, func() { suite.Run(test.name, func() {
runGetPreviewLimits( runGetPreviewLimits(
suite.T(), suite.T(),
@ -521,9 +541,15 @@ func runGetPreviewLimits(
itemIDs = append(itemIDs, id) itemIDs = append(itemIDs, id)
} }
expectItemIDs := test.expectedItemIDsInCollection[folderPath]
if opts.ToggleFeatures.UseDeltaTree && test.expectedItemIDsInCollectionTree != nil {
expectItemIDs = test.expectedItemIDsInCollectionTree[folderPath]
}
assert.ElementsMatchf( assert.ElementsMatchf(
t, t,
test.expectedItemIDsInCollection[folderPath], expectItemIDs,
itemIDs, itemIDs,
"item IDs in collection with path:\n\t%q", "item IDs in collection with path:\n\t%q",
folderPath) folderPath)
@ -542,6 +568,9 @@ type defaultLimitTestExpects struct {
numItems int numItems int
numContainers int numContainers int
numItemsPerContainer int numItemsPerContainer int
// the tree handling behavior may deviate under certain conditions
// since it allows one file to slightly step over the byte limit
numItemsTreePadding int
} }
type defaultLimitTest struct { type defaultLimitTest struct {
@ -641,6 +670,7 @@ func defaultLimitsTable() []defaultLimitTest {
numItems: int(defaultPreviewMaxBytes) / 1024 / 1024, numItems: int(defaultPreviewMaxBytes) / 1024 / 1024,
numContainers: 1, numContainers: 1,
numItemsPerContainer: int(defaultPreviewMaxBytes) / 1024 / 1024, numItemsPerContainer: int(defaultPreviewMaxBytes) / 1024 / 1024,
numItemsTreePadding: 1,
}, },
}, },
} }
@ -666,8 +696,6 @@ func (suite *LimiterUnitSuite) TestGet_PreviewLimits_defaultsNoTree() {
// These tests run a reduced set of checks that really just look for item counts // These tests run a reduced set of checks that really just look for item counts
// and such. Other tests are expected to provide more comprehensive checks. // and such. Other tests are expected to provide more comprehensive checks.
func (suite *LimiterUnitSuite) TestGet_PreviewLimits_defaultsWithTree() { func (suite *LimiterUnitSuite) TestGet_PreviewLimits_defaultsWithTree() {
suite.T().Skip("TODO: unskip when tree produces collections")
opts := control.DefaultOptions() opts := control.DefaultOptions()
opts.ToggleFeatures.UseDeltaTree = true opts.ToggleFeatures.UseDeltaTree = true
@ -714,7 +742,7 @@ func runGetPreviewLimitsDefaults(
for containerIdx := 0; containerIdx < test.numContainers; containerIdx++ { for containerIdx := 0; containerIdx < test.numContainers; containerIdx++ {
page := nextPage{ page := nextPage{
Items: []models.DriveItemable{ Items: []models.DriveItemable{
driveRootFolder(), rootFolder(),
driveItem( driveItem(
folderID(containerIdx), folderID(containerIdx),
folderName(containerIdx), folderName(containerIdx),
@ -798,11 +826,16 @@ func runGetPreviewLimitsDefaults(
numItems += len(col.driveItems) numItems += len(col.driveItems)
// Add one to account for the folder permissions item. // Add one to account for the folder permissions item.
expected := test.expect.numItemsPerContainer + 1
if opts.ToggleFeatures.UseDeltaTree {
expected += test.expect.numItemsTreePadding
}
assert.Len( assert.Len(
t, t,
col.driveItems, col.driveItems,
test.expect.numItemsPerContainer+1, expected,
"items in container %v", "number of items in collection at:\n\t%+v",
col.FullPath()) col.FullPath())
} }
@ -810,12 +843,18 @@ func runGetPreviewLimitsDefaults(
t, t,
test.expect.numContainers, test.expect.numContainers,
numContainers, numContainers,
"total containers") "total count of collections")
// Add one to account for the folder permissions item.
expected := test.expect.numItems + test.expect.numContainers
if opts.ToggleFeatures.UseDeltaTree {
expected += test.expect.numItemsTreePadding
}
// Each container also gets an item so account for that here. // Each container also gets an item so account for that here.
assert.Equal( assert.Equal(
t, t,
test.expect.numItems+test.expect.numContainers, expected,
numItems, numItems,
"total items across all containers") "total sum of item counts in all collections")
} }

View File

@ -509,7 +509,7 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
pages: []nextPage{ pages: []nextPage{
aPage( aPage(
d.fileWURLAtRoot(aURL(1), false, 1), d.fileWURLAtRoot(aURL(1), false, 1),
d.folderAtRoot(2)), d.folderAt(root, 2)),
}, },
expectedItemProps: map[string]itemProps{ expectedItemProps: map[string]itemProps{
fileID(2): {}, fileID(2): {},

View File

@ -33,6 +33,15 @@ type DriveItem struct {
additionalData map[string]any additionalData map[string]any
} }
func NewDriveItem(
id, name string,
) *DriveItem {
return &DriveItem{
id: ptr.To(id),
name: ptr.To(name),
}
}
// Disable revive linter since we want to follow naming scheme used by graph SDK here. // Disable revive linter since we want to follow naming scheme used by graph SDK here.
// nolint: revive // nolint: revive
func (c *DriveItem) GetId() *string { func (c *DriveItem) GetId() *string {