add new funcs and tidy up drive limiter (#4734)
adds some new functions to the drive limiter that will be used specifically in the tree-based backup process. Also updates the limiter tests to have separate versions for the tree and non-tree variations. In this PR, the tree variation will definitely fail. The next PR will be focused on backup process corrections needed to ensure that the limit handling is correct according to the existing tests. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🌻 Feature #### Issue(s) * #4689 #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
b5c9199695
commit
047d46ea53
@ -29,6 +29,8 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api/pagers"
|
||||
)
|
||||
|
||||
var errGetTreeNotImplemented = clues.New("forced error: cannot run tree-based backup: incomplete implementation")
|
||||
|
||||
const (
|
||||
restrictedDirectory = "Site Pages"
|
||||
|
||||
@ -292,14 +294,14 @@ func (c *Collections) Get(
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
||||
_, _, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
||||
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
||||
if err != nil {
|
||||
return nil, false, clues.Wrap(err, "processing backup using tree")
|
||||
}
|
||||
|
||||
return nil,
|
||||
false,
|
||||
clues.New("forced error: cannot run tree-based backup: incomplete implementation")
|
||||
return colls,
|
||||
canUsePrevBackup,
|
||||
errGetTreeNotImplemented
|
||||
}
|
||||
|
||||
deltasByDriveID, prevPathsByDriveID, canUsePrevBackup, err := deserializeAndValidateMetadata(
|
||||
@ -856,7 +858,7 @@ func (c *Collections) PopulateDriveCollections(
|
||||
// Don't check for containers we've already seen.
|
||||
if _, ok := c.CollectionMap[driveID][id]; !ok {
|
||||
if id != lastContainerID {
|
||||
if limiter.atLimit(stats, ignoreMe) {
|
||||
if limiter.atLimit(stats) {
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -156,8 +156,6 @@ func (c *Collections) getTree(
|
||||
return collections, canUsePrevBackup, nil
|
||||
}
|
||||
|
||||
var errTreeNotImplemented = clues.New("backup tree not implemented")
|
||||
|
||||
func (c *Collections) makeDriveCollections(
|
||||
ctx context.Context,
|
||||
drv models.Driveable,
|
||||
@ -172,10 +170,7 @@ func (c *Collections) makeDriveCollections(
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
||||
}
|
||||
|
||||
var (
|
||||
tree = newFolderyMcFolderFace(ppfx)
|
||||
stats = &driveEnumerationStats{}
|
||||
)
|
||||
tree := newFolderyMcFolderFace(ppfx)
|
||||
|
||||
counter.Add(count.PrevPaths, int64(len(prevPaths)))
|
||||
|
||||
@ -184,10 +179,9 @@ func (c *Collections) makeDriveCollections(
|
||||
du, err := c.populateTree(
|
||||
ctx,
|
||||
tree,
|
||||
limiter,
|
||||
stats,
|
||||
drv,
|
||||
prevDeltaLink,
|
||||
limiter,
|
||||
counter,
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -260,7 +254,7 @@ func (c *Collections) makeDriveCollections(
|
||||
return nil, nil, du, nil
|
||||
}
|
||||
|
||||
return nil, nil, du, errTreeNotImplemented
|
||||
return nil, nil, du, errGetTreeNotImplemented
|
||||
}
|
||||
|
||||
// populateTree constructs a new tree and populates it with items
|
||||
@ -268,10 +262,9 @@ func (c *Collections) makeDriveCollections(
|
||||
func (c *Collections) populateTree(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
limiter *pagerLimiter,
|
||||
stats *driveEnumerationStats,
|
||||
drv models.Driveable,
|
||||
prevDeltaLink string,
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
errs *fault.Bus,
|
||||
) (pagers.DeltaUpdate, error) {
|
||||
@ -297,23 +290,18 @@ func (c *Collections) populateTree(
|
||||
break
|
||||
}
|
||||
|
||||
counter.Inc(count.PagesEnumerated)
|
||||
|
||||
if reset {
|
||||
counter.Inc(count.PagerResets)
|
||||
tree.reset()
|
||||
c.resetStats()
|
||||
|
||||
*stats = driveEnumerationStats{}
|
||||
}
|
||||
|
||||
err := c.enumeratePageOfItems(
|
||||
ctx,
|
||||
tree,
|
||||
limiter,
|
||||
stats,
|
||||
drv,
|
||||
page,
|
||||
limiter,
|
||||
counter,
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -324,17 +312,12 @@ func (c *Collections) populateTree(
|
||||
el.AddRecoverable(ctx, clues.Stack(err))
|
||||
}
|
||||
|
||||
// Stop enumeration early if we've reached the item or page limit. Do this
|
||||
// at the end of the loop so we don't request another page in the
|
||||
// background.
|
||||
//
|
||||
// We don't want to break on just the container limit here because it's
|
||||
// possible that there's more items in the current (final) container that
|
||||
// we're processing. We need to see the next page to determine if we've
|
||||
// reached the end of the container. Note that this doesn't take into
|
||||
// account the number of items in the current container, so it's possible it
|
||||
// will fetch more data when it doesn't really need to.
|
||||
if limiter.atPageLimit(stats) || limiter.atItemLimit(stats) {
|
||||
counter.Inc(count.PagesEnumerated)
|
||||
|
||||
// Stop enumeration early if we've reached the page limit. Keep this
|
||||
// at the end of the loop so we don't request another page (pager.NextPage)
|
||||
// before seeing we've passed the limit.
|
||||
if limiter.hitPageLimit(int(counter.Get(count.PagesEnumerated))) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -357,10 +340,9 @@ func (c *Collections) populateTree(
|
||||
func (c *Collections) enumeratePageOfItems(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
limiter *pagerLimiter,
|
||||
stats *driveEnumerationStats,
|
||||
drv models.Driveable,
|
||||
page []models.DriveItemable,
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
@ -390,14 +372,9 @@ func (c *Collections) enumeratePageOfItems(
|
||||
|
||||
switch {
|
||||
case isFolder:
|
||||
// check limits before adding the next new folder
|
||||
if !tree.containsFolder(itemID) && limiter.atLimit(stats, len(tree.folderIDToNode)) {
|
||||
return errHitLimit
|
||||
}
|
||||
|
||||
skipped, err = c.addFolderToTree(ictx, tree, drv, item, stats, counter)
|
||||
skipped, err = c.addFolderToTree(ictx, tree, drv, item, limiter, counter)
|
||||
case isFile:
|
||||
skipped, err = c.addFileToTree(ictx, tree, drv, item, limiter, stats, counter)
|
||||
skipped, err = c.addFileToTree(ictx, tree, drv, item, limiter, counter)
|
||||
default:
|
||||
err = clues.NewWC(ictx, "item is neither folder nor file").
|
||||
Label(fault.LabelForceNoBackupCreation, count.UnknownItemType)
|
||||
@ -408,22 +385,14 @@ func (c *Collections) enumeratePageOfItems(
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "adding item"))
|
||||
}
|
||||
if errors.Is(err, errHitLimit) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if we reached the item or size limit while processing this page.
|
||||
// The check after this loop will get us out of the pager.
|
||||
// We don't want to check all limits because it's possible we've reached
|
||||
// the container limit but haven't reached the item limit or really added
|
||||
// items to the last container we found.
|
||||
// FIXME(keepers): this isn't getting handled properly at the moment
|
||||
if limiter.atItemLimit(stats) {
|
||||
return errHitLimit
|
||||
el.AddRecoverable(ictx, clues.Wrap(err, "adding folder"))
|
||||
}
|
||||
}
|
||||
|
||||
stats.numPages++
|
||||
|
||||
return clues.Stack(el.Failure()).OrNil()
|
||||
}
|
||||
|
||||
@ -432,7 +401,7 @@ func (c *Collections) addFolderToTree(
|
||||
tree *folderyMcFolderFace,
|
||||
drv models.Driveable,
|
||||
folder models.DriveItemable,
|
||||
stats *driveEnumerationStats,
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
) (*fault.Skipped, error) {
|
||||
var (
|
||||
@ -447,6 +416,11 @@ func (c *Collections) addFolderToTree(
|
||||
notSelected bool
|
||||
)
|
||||
|
||||
// check container limits before adding the next new folder
|
||||
if !tree.containsFolder(folderID) && limiter.hitContainerLimit(tree.countLiveFolders()) {
|
||||
return nil, errHitLimit
|
||||
}
|
||||
|
||||
if parent != nil {
|
||||
parentID = ptr.Val(parent.GetId())
|
||||
}
|
||||
@ -541,18 +515,18 @@ func (c *Collections) addFileToTree(
|
||||
drv models.Driveable,
|
||||
file models.DriveItemable,
|
||||
limiter *pagerLimiter,
|
||||
stats *driveEnumerationStats,
|
||||
counter *count.Bus,
|
||||
) (*fault.Skipped, error) {
|
||||
var (
|
||||
driveID = ptr.Val(drv.GetId())
|
||||
fileID = ptr.Val(file.GetId())
|
||||
fileName = ptr.Val(file.GetName())
|
||||
fileSize = ptr.Val(file.GetSize())
|
||||
isDeleted = file.GetDeleted() != nil
|
||||
isMalware = file.GetMalware() != nil
|
||||
parent = file.GetParentReference()
|
||||
parentID string
|
||||
driveID = ptr.Val(drv.GetId())
|
||||
fileID = ptr.Val(file.GetId())
|
||||
fileName = ptr.Val(file.GetName())
|
||||
fileSize = ptr.Val(file.GetSize())
|
||||
lastModified = ptr.Val(file.GetLastModifiedDateTime())
|
||||
isDeleted = file.GetDeleted() != nil
|
||||
isMalware = file.GetMalware() != nil
|
||||
parent = file.GetParentReference()
|
||||
parentID string
|
||||
)
|
||||
|
||||
if parent != nil {
|
||||
@ -583,53 +557,37 @@ func (c *Collections) addFileToTree(
|
||||
return skip, nil
|
||||
}
|
||||
|
||||
_, alreadySeen := tree.fileIDToParentID[fileID]
|
||||
|
||||
if isDeleted {
|
||||
tree.deleteFile(fileID)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if alreadySeen {
|
||||
stats.numAddedFiles--
|
||||
// FIXME(keepers): this might be faulty,
|
||||
// since deletes may not include the file size.
|
||||
// it will likely need to be tracked in
|
||||
// the tree alongside the file modtime.
|
||||
stats.numBytes -= fileSize
|
||||
} else {
|
||||
c.NumItems++
|
||||
c.NumFiles++
|
||||
_, alreadySeen := tree.fileIDToParentID[fileID]
|
||||
parentNode, parentNotNil := tree.folderIDToNode[parentID]
|
||||
|
||||
if parentNotNil && !alreadySeen {
|
||||
countSize := tree.countLiveFilesAndSizes()
|
||||
|
||||
// Don't add new items if the new collection has already reached it's limit.
|
||||
// item moves and updates are generally allowed through.
|
||||
if limiter.atContainerItemsLimit(len(parentNode.files)) || limiter.hitItemLimit(countSize.numFiles) {
|
||||
return nil, errHitLimit
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
// Skip large files that don't fit within the size limit.
|
||||
// unlike the other checks, which see if we're already at the limit, this check
|
||||
// needs to be forward-facing to ensure we don't go far over the limit.
|
||||
// Example case: a 1gb limit and a 25gb file.
|
||||
if limiter.hitTotalBytesLimit(fileSize + countSize.totalBytes) {
|
||||
return nil, errHitLimit
|
||||
}
|
||||
}
|
||||
|
||||
parentNode, ok := tree.folderIDToNode[parentID]
|
||||
|
||||
// Don't add new items if the new collection is already reached it's limit.
|
||||
// item moves and updates are generally allowed through.
|
||||
if ok && !alreadySeen && limiter.atContainerItemsLimit(len(parentNode.files)) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Skip large files that don't fit within the size limit.
|
||||
if limiter.aboveSizeLimit(fileSize + stats.numBytes) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := tree.addFile(parentID, fileID, ptr.Val(file.GetLastModifiedDateTime()))
|
||||
err := tree.addFile(parentID, fileID, lastModified, fileSize)
|
||||
if err != nil {
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
// Only increment counters for new files
|
||||
if !alreadySeen {
|
||||
// todo: remmove c.NumItems/Files in favor of counter and tree counting.
|
||||
c.NumItems++
|
||||
c.NumFiles++
|
||||
stats.numAddedFiles++
|
||||
stats.numBytes += fileSize
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -80,8 +80,8 @@ type nodeyMcNodeFace struct {
|
||||
prev path.Elements
|
||||
// folderID -> node
|
||||
children map[string]*nodeyMcNodeFace
|
||||
// file item ID -> last modified time
|
||||
files map[string]time.Time
|
||||
// file item ID -> file metadata
|
||||
files map[string]fileyMcFileFace
|
||||
// for special handling protocols around packages
|
||||
isPackage bool
|
||||
}
|
||||
@ -96,11 +96,16 @@ func newNodeyMcNodeFace(
|
||||
id: id,
|
||||
name: name,
|
||||
children: map[string]*nodeyMcNodeFace{},
|
||||
files: map[string]time.Time{},
|
||||
files: map[string]fileyMcFileFace{},
|
||||
isPackage: isPackage,
|
||||
}
|
||||
}
|
||||
|
||||
type fileyMcFileFace struct {
|
||||
lastModified time.Time
|
||||
contentSize int64
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// folder handling
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -114,10 +119,10 @@ func (face *folderyMcFolderFace) containsFolder(id string) bool {
|
||||
return stillKicking || alreadyBuried
|
||||
}
|
||||
|
||||
// CountNodes returns a count that is the sum of live folders and
|
||||
// tombstones recorded in the tree.
|
||||
func (face *folderyMcFolderFace) countFolders() int {
|
||||
return len(face.tombstones) + len(face.folderIDToNode)
|
||||
// countLiveFolders returns a count of the number of folders held in the tree.
|
||||
// Tombstones are not included in the count. Only live folders.
|
||||
func (face *folderyMcFolderFace) countLiveFolders() int {
|
||||
return len(face.folderIDToNode)
|
||||
}
|
||||
|
||||
func (face *folderyMcFolderFace) getNode(id string) *nodeyMcNodeFace {
|
||||
@ -264,12 +269,52 @@ func (face *folderyMcFolderFace) setTombstone(
|
||||
return nil
|
||||
}
|
||||
|
||||
type countAndSize struct {
|
||||
numFiles int
|
||||
totalBytes int64
|
||||
}
|
||||
|
||||
// countLiveFilesAndSizes returns a count of the number of files in the tree
|
||||
// and the sum of all of their sizes. Only includes files that are not
|
||||
// children of tombstoned containers. If running an incremental backup, a
|
||||
// live file may be either a creation or an update.
|
||||
func (face *folderyMcFolderFace) countLiveFilesAndSizes() countAndSize {
|
||||
return countFilesAndSizes(face.root)
|
||||
}
|
||||
|
||||
func countFilesAndSizes(nodey *nodeyMcNodeFace) countAndSize {
|
||||
if nodey == nil {
|
||||
return countAndSize{}
|
||||
}
|
||||
|
||||
var (
|
||||
fileCount int
|
||||
sumContentSize int64
|
||||
)
|
||||
|
||||
for _, child := range nodey.children {
|
||||
countSize := countFilesAndSizes(child)
|
||||
fileCount += countSize.numFiles
|
||||
sumContentSize += countSize.totalBytes
|
||||
}
|
||||
|
||||
for _, file := range nodey.files {
|
||||
sumContentSize += file.contentSize
|
||||
}
|
||||
|
||||
return countAndSize{
|
||||
numFiles: fileCount + len(nodey.files),
|
||||
totalBytes: sumContentSize,
|
||||
}
|
||||
}
|
||||
|
||||
// addFile places the file in the correct parent node. If the
|
||||
// file was already added to the tree and is getting relocated,
|
||||
// this func will update and/or clean up all the old references.
|
||||
func (face *folderyMcFolderFace) addFile(
|
||||
parentID, id string,
|
||||
lastModifed time.Time,
|
||||
lastModified time.Time,
|
||||
contentSize int64,
|
||||
) error {
|
||||
if len(parentID) == 0 {
|
||||
return clues.New("item added without parent folder ID")
|
||||
@ -298,7 +343,10 @@ func (face *folderyMcFolderFace) addFile(
|
||||
}
|
||||
|
||||
face.fileIDToParentID[id] = parentID
|
||||
parent.files[id] = lastModifed
|
||||
parent.files[id] = fileyMcFileFace{
|
||||
lastModified: lastModified,
|
||||
contentSize: contentSize,
|
||||
}
|
||||
|
||||
delete(face.deletedFileIDs, id)
|
||||
|
||||
|
||||
@ -40,6 +40,7 @@ func treeWithFolders() *folderyMcFolderFace {
|
||||
|
||||
o := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
||||
tree.folderIDToNode[o.id] = o
|
||||
tree.root.children[o.id] = o
|
||||
|
||||
f := newNodeyMcNodeFace(o, id(folder), name(folder), false)
|
||||
tree.folderIDToNode[f.id] = f
|
||||
@ -49,16 +50,22 @@ func treeWithFolders() *folderyMcFolderFace {
|
||||
}
|
||||
|
||||
func treeWithFileAtRoot() *folderyMcFolderFace {
|
||||
tree := treeWithFolders()
|
||||
tree.root.files[id(file)] = time.Now()
|
||||
tree := treeWithRoot()
|
||||
tree.root.files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
}
|
||||
tree.fileIDToParentID[id(file)] = rootID
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFileInFolder() *folderyMcFolderFace {
|
||||
tree := treeWithFileAtRoot()
|
||||
tree.folderIDToNode[id(folder)].files[id(file)] = time.Now()
|
||||
tree := treeWithFolders()
|
||||
tree.folderIDToNode[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
}
|
||||
tree.fileIDToParentID[id(file)] = id(folder)
|
||||
|
||||
return tree
|
||||
@ -66,7 +73,10 @@ func treeWithFileInFolder() *folderyMcFolderFace {
|
||||
|
||||
func treeWithFileInTombstone() *folderyMcFolderFace {
|
||||
tree := treeWithTombstone()
|
||||
tree.tombstones[id(folder)].files[id(file)] = time.Now()
|
||||
tree.tombstones[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
}
|
||||
tree.fileIDToParentID[id(file)] = id(folder)
|
||||
|
||||
return tree
|
||||
@ -689,6 +699,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree *folderyMcFolderFace
|
||||
oldParentID string
|
||||
parentID string
|
||||
contentSize int64
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
expectFiles map[string]string
|
||||
}{
|
||||
@ -697,6 +708,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithRoot(),
|
||||
oldParentID: "",
|
||||
parentID: rootID,
|
||||
contentSize: 42,
|
||||
expectErr: assert.NoError,
|
||||
expectFiles: map[string]string{id(file): rootID},
|
||||
},
|
||||
@ -705,6 +717,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithFolders(),
|
||||
oldParentID: "",
|
||||
parentID: id(folder),
|
||||
contentSize: 24,
|
||||
expectErr: assert.NoError,
|
||||
expectFiles: map[string]string{id(file): id(folder)},
|
||||
},
|
||||
@ -713,6 +726,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithFileAtRoot(),
|
||||
oldParentID: rootID,
|
||||
parentID: rootID,
|
||||
contentSize: 84,
|
||||
expectErr: assert.NoError,
|
||||
expectFiles: map[string]string{id(file): rootID},
|
||||
},
|
||||
@ -721,6 +735,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithFileInFolder(),
|
||||
oldParentID: id(folder),
|
||||
parentID: rootID,
|
||||
contentSize: 48,
|
||||
expectErr: assert.NoError,
|
||||
expectFiles: map[string]string{id(file): rootID},
|
||||
},
|
||||
@ -729,6 +744,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithFileInTombstone(),
|
||||
oldParentID: id(folder),
|
||||
parentID: rootID,
|
||||
contentSize: 2,
|
||||
expectErr: assert.NoError,
|
||||
expectFiles: map[string]string{id(file): rootID},
|
||||
},
|
||||
@ -737,6 +753,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithTombstone(),
|
||||
oldParentID: "",
|
||||
parentID: id(folder),
|
||||
contentSize: 4,
|
||||
expectErr: assert.Error,
|
||||
expectFiles: map[string]string{},
|
||||
},
|
||||
@ -745,6 +762,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithTombstone(),
|
||||
oldParentID: "",
|
||||
parentID: idx(folder, 1),
|
||||
contentSize: 8,
|
||||
expectErr: assert.Error,
|
||||
expectFiles: map[string]string{},
|
||||
},
|
||||
@ -753,6 +771,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
tree: treeWithTombstone(),
|
||||
oldParentID: "",
|
||||
parentID: "",
|
||||
contentSize: 16,
|
||||
expectErr: assert.Error,
|
||||
expectFiles: map[string]string{},
|
||||
},
|
||||
@ -764,7 +783,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
err := test.tree.addFile(
|
||||
test.parentID,
|
||||
id(file),
|
||||
time.Now())
|
||||
time.Now(),
|
||||
test.contentSize)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, test.expectFiles, test.tree.fileIDToParentID)
|
||||
|
||||
@ -777,6 +797,10 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
require.NotNil(t, parent)
|
||||
assert.Contains(t, parent.files, id(file))
|
||||
|
||||
countSize := test.tree.countLiveFilesAndSizes()
|
||||
assert.Equal(t, 1, countSize.numFiles, "should have one file in the tree")
|
||||
assert.Equal(t, test.contentSize, countSize.totalBytes, "tree should be sized to test file contents")
|
||||
|
||||
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
||||
old, ok := test.tree.folderIDToNode[test.oldParentID]
|
||||
if !ok {
|
||||
@ -848,7 +872,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
||||
assert.Len(t, tree.deletedFileIDs, 1)
|
||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||
|
||||
err := tree.addFile(rootID, fID, time.Now())
|
||||
err := tree.addFile(rootID, fID, time.Now(), defaultItemSize)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Len(t, tree.fileIDToParentID, 1)
|
||||
|
||||
@ -6,9 +6,6 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
)
|
||||
|
||||
// used to mark an unused variable while we transition handling.
|
||||
const ignoreMe = -1
|
||||
|
||||
var errHitLimit = clues.New("hit limiter limits")
|
||||
|
||||
type driveEnumerationStats struct {
|
||||
@ -62,10 +59,6 @@ func (l pagerLimiter) sizeLimit() int64 {
|
||||
return l.limits.MaxBytes
|
||||
}
|
||||
|
||||
func (l pagerLimiter) aboveSizeLimit(i int64) bool {
|
||||
return l.limits.Enabled && (i >= l.limits.MaxBytes)
|
||||
}
|
||||
|
||||
// atItemLimit returns true if the limiter is enabled and has reached the limit
|
||||
// for individual items added to collections for this backup.
|
||||
func (l pagerLimiter) atItemLimit(stats *driveEnumerationStats) bool {
|
||||
@ -81,7 +74,7 @@ func (l pagerLimiter) atContainerItemsLimit(numItems int) bool {
|
||||
return l.enabled() && numItems >= l.limits.MaxItemsPerContainer
|
||||
}
|
||||
|
||||
// atContainerPageLimit returns true if the limiter is enabled and the number of
|
||||
// atPageLimit returns true if the limiter is enabled and the number of
|
||||
// pages processed so far is beyond the limit for this backup.
|
||||
func (l pagerLimiter) atPageLimit(stats *driveEnumerationStats) bool {
|
||||
return l.enabled() && stats.numPages >= l.limits.MaxPages
|
||||
@ -89,17 +82,38 @@ func (l pagerLimiter) atPageLimit(stats *driveEnumerationStats) bool {
|
||||
|
||||
// atLimit returns true if the limiter is enabled and meets any of the
|
||||
// conditions for max items, containers, etc for this backup.
|
||||
func (l pagerLimiter) atLimit(
|
||||
stats *driveEnumerationStats,
|
||||
containerCount int,
|
||||
) bool {
|
||||
nc := stats.numContainers
|
||||
if nc == 0 && containerCount > 0 {
|
||||
nc = containerCount
|
||||
}
|
||||
|
||||
func (l pagerLimiter) atLimit(stats *driveEnumerationStats) bool {
|
||||
return l.enabled() &&
|
||||
(l.atItemLimit(stats) ||
|
||||
nc >= l.limits.MaxContainers ||
|
||||
stats.numContainers >= l.limits.MaxContainers ||
|
||||
stats.numPages >= l.limits.MaxPages)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Used by the tree version limit handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// hitPageLimit returns true if the limiter is enabled and the number of
|
||||
// pages processed so far is beyond the limit for this backup.
|
||||
func (l pagerLimiter) hitPageLimit(pageCount int) bool {
|
||||
return l.enabled() && pageCount >= l.limits.MaxPages
|
||||
}
|
||||
|
||||
// hitContainerLimit returns true if the limiter is enabled and the number of
|
||||
// unique containers added so far is beyond the limit for this backup.
|
||||
func (l pagerLimiter) hitContainerLimit(containerCount int) bool {
|
||||
return l.enabled() && containerCount >= l.limits.MaxContainers
|
||||
}
|
||||
|
||||
// hitItemLimit returns true if the limiter is enabled and has reached the limit
|
||||
// for unique items added to collections for this backup.
|
||||
func (l pagerLimiter) hitItemLimit(itemCount int) bool {
|
||||
return l.enabled() && itemCount >= l.limits.MaxItems
|
||||
}
|
||||
|
||||
// hitTotalBytesLimit returns true if the limiter is enabled and has reached the limit
|
||||
// for the accumulated byte size of all items (the file contents, not the item metadata)
|
||||
// added to collections for this backup.
|
||||
func (l pagerLimiter) hitTotalBytesLimit(i int64) bool {
|
||||
return l.enabled() && i >= l.limits.MaxBytes
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user