add new funcs and tidy up drive limiter (#4734)
adds some new functions to the drive limiter that will be used specifically in the tree-based backup process. Also updates the limiter tests to have separate versions for the tree and non-tree variations. In this PR, the tree variation will definitely fail. The next PR will be focused on backup process corrections needed to ensure that the limit handling is correct according to the existing tests. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🌻 Feature #### Issue(s) * #4689 #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
b5c9199695
commit
047d46ea53
@ -29,6 +29,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/pagers"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/pagers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errGetTreeNotImplemented = clues.New("forced error: cannot run tree-based backup: incomplete implementation")
|
||||||
|
|
||||||
const (
|
const (
|
||||||
restrictedDirectory = "Site Pages"
|
restrictedDirectory = "Site Pages"
|
||||||
|
|
||||||
@ -292,14 +294,14 @@ func (c *Collections) Get(
|
|||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
||||||
_, _, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "processing backup using tree")
|
return nil, false, clues.Wrap(err, "processing backup using tree")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil,
|
return colls,
|
||||||
false,
|
canUsePrevBackup,
|
||||||
clues.New("forced error: cannot run tree-based backup: incomplete implementation")
|
errGetTreeNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
deltasByDriveID, prevPathsByDriveID, canUsePrevBackup, err := deserializeAndValidateMetadata(
|
deltasByDriveID, prevPathsByDriveID, canUsePrevBackup, err := deserializeAndValidateMetadata(
|
||||||
@ -856,7 +858,7 @@ func (c *Collections) PopulateDriveCollections(
|
|||||||
// Don't check for containers we've already seen.
|
// Don't check for containers we've already seen.
|
||||||
if _, ok := c.CollectionMap[driveID][id]; !ok {
|
if _, ok := c.CollectionMap[driveID][id]; !ok {
|
||||||
if id != lastContainerID {
|
if id != lastContainerID {
|
||||||
if limiter.atLimit(stats, ignoreMe) {
|
if limiter.atLimit(stats) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -156,8 +156,6 @@ func (c *Collections) getTree(
|
|||||||
return collections, canUsePrevBackup, nil
|
return collections, canUsePrevBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errTreeNotImplemented = clues.New("backup tree not implemented")
|
|
||||||
|
|
||||||
func (c *Collections) makeDriveCollections(
|
func (c *Collections) makeDriveCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
@ -172,10 +170,7 @@ func (c *Collections) makeDriveCollections(
|
|||||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
tree := newFolderyMcFolderFace(ppfx)
|
||||||
tree = newFolderyMcFolderFace(ppfx)
|
|
||||||
stats = &driveEnumerationStats{}
|
|
||||||
)
|
|
||||||
|
|
||||||
counter.Add(count.PrevPaths, int64(len(prevPaths)))
|
counter.Add(count.PrevPaths, int64(len(prevPaths)))
|
||||||
|
|
||||||
@ -184,10 +179,9 @@ func (c *Collections) makeDriveCollections(
|
|||||||
du, err := c.populateTree(
|
du, err := c.populateTree(
|
||||||
ctx,
|
ctx,
|
||||||
tree,
|
tree,
|
||||||
limiter,
|
|
||||||
stats,
|
|
||||||
drv,
|
drv,
|
||||||
prevDeltaLink,
|
prevDeltaLink,
|
||||||
|
limiter,
|
||||||
counter,
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -260,7 +254,7 @@ func (c *Collections) makeDriveCollections(
|
|||||||
return nil, nil, du, nil
|
return nil, nil, du, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, du, errTreeNotImplemented
|
return nil, nil, du, errGetTreeNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
// populateTree constructs a new tree and populates it with items
|
// populateTree constructs a new tree and populates it with items
|
||||||
@ -268,10 +262,9 @@ func (c *Collections) makeDriveCollections(
|
|||||||
func (c *Collections) populateTree(
|
func (c *Collections) populateTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
tree *folderyMcFolderFace,
|
tree *folderyMcFolderFace,
|
||||||
limiter *pagerLimiter,
|
|
||||||
stats *driveEnumerationStats,
|
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
prevDeltaLink string,
|
prevDeltaLink string,
|
||||||
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (pagers.DeltaUpdate, error) {
|
) (pagers.DeltaUpdate, error) {
|
||||||
@ -297,23 +290,18 @@ func (c *Collections) populateTree(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
counter.Inc(count.PagesEnumerated)
|
|
||||||
|
|
||||||
if reset {
|
if reset {
|
||||||
counter.Inc(count.PagerResets)
|
counter.Inc(count.PagerResets)
|
||||||
tree.reset()
|
tree.reset()
|
||||||
c.resetStats()
|
c.resetStats()
|
||||||
|
|
||||||
*stats = driveEnumerationStats{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.enumeratePageOfItems(
|
err := c.enumeratePageOfItems(
|
||||||
ctx,
|
ctx,
|
||||||
tree,
|
tree,
|
||||||
limiter,
|
|
||||||
stats,
|
|
||||||
drv,
|
drv,
|
||||||
page,
|
page,
|
||||||
|
limiter,
|
||||||
counter,
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -324,17 +312,12 @@ func (c *Collections) populateTree(
|
|||||||
el.AddRecoverable(ctx, clues.Stack(err))
|
el.AddRecoverable(ctx, clues.Stack(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop enumeration early if we've reached the item or page limit. Do this
|
counter.Inc(count.PagesEnumerated)
|
||||||
// at the end of the loop so we don't request another page in the
|
|
||||||
// background.
|
// Stop enumeration early if we've reached the page limit. Keep this
|
||||||
//
|
// at the end of the loop so we don't request another page (pager.NextPage)
|
||||||
// We don't want to break on just the container limit here because it's
|
// before seeing we've passed the limit.
|
||||||
// possible that there's more items in the current (final) container that
|
if limiter.hitPageLimit(int(counter.Get(count.PagesEnumerated))) {
|
||||||
// we're processing. We need to see the next page to determine if we've
|
|
||||||
// reached the end of the container. Note that this doesn't take into
|
|
||||||
// account the number of items in the current container, so it's possible it
|
|
||||||
// will fetch more data when it doesn't really need to.
|
|
||||||
if limiter.atPageLimit(stats) || limiter.atItemLimit(stats) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -357,10 +340,9 @@ func (c *Collections) populateTree(
|
|||||||
func (c *Collections) enumeratePageOfItems(
|
func (c *Collections) enumeratePageOfItems(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
tree *folderyMcFolderFace,
|
tree *folderyMcFolderFace,
|
||||||
limiter *pagerLimiter,
|
|
||||||
stats *driveEnumerationStats,
|
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
page []models.DriveItemable,
|
page []models.DriveItemable,
|
||||||
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
@ -390,14 +372,9 @@ func (c *Collections) enumeratePageOfItems(
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case isFolder:
|
case isFolder:
|
||||||
// check limits before adding the next new folder
|
skipped, err = c.addFolderToTree(ictx, tree, drv, item, limiter, counter)
|
||||||
if !tree.containsFolder(itemID) && limiter.atLimit(stats, len(tree.folderIDToNode)) {
|
|
||||||
return errHitLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
skipped, err = c.addFolderToTree(ictx, tree, drv, item, stats, counter)
|
|
||||||
case isFile:
|
case isFile:
|
||||||
skipped, err = c.addFileToTree(ictx, tree, drv, item, limiter, stats, counter)
|
skipped, err = c.addFileToTree(ictx, tree, drv, item, limiter, counter)
|
||||||
default:
|
default:
|
||||||
err = clues.NewWC(ictx, "item is neither folder nor file").
|
err = clues.NewWC(ictx, "item is neither folder nor file").
|
||||||
Label(fault.LabelForceNoBackupCreation, count.UnknownItemType)
|
Label(fault.LabelForceNoBackupCreation, count.UnknownItemType)
|
||||||
@ -408,22 +385,14 @@ func (c *Collections) enumeratePageOfItems(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ictx, clues.Wrap(err, "adding item"))
|
if errors.Is(err, errHitLimit) {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we reached the item or size limit while processing this page.
|
el.AddRecoverable(ictx, clues.Wrap(err, "adding folder"))
|
||||||
// The check after this loop will get us out of the pager.
|
|
||||||
// We don't want to check all limits because it's possible we've reached
|
|
||||||
// the container limit but haven't reached the item limit or really added
|
|
||||||
// items to the last container we found.
|
|
||||||
// FIXME(keepers): this isn't getting handled properly at the moment
|
|
||||||
if limiter.atItemLimit(stats) {
|
|
||||||
return errHitLimit
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.numPages++
|
|
||||||
|
|
||||||
return clues.Stack(el.Failure()).OrNil()
|
return clues.Stack(el.Failure()).OrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,7 +401,7 @@ func (c *Collections) addFolderToTree(
|
|||||||
tree *folderyMcFolderFace,
|
tree *folderyMcFolderFace,
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
folder models.DriveItemable,
|
folder models.DriveItemable,
|
||||||
stats *driveEnumerationStats,
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
) (*fault.Skipped, error) {
|
) (*fault.Skipped, error) {
|
||||||
var (
|
var (
|
||||||
@ -447,6 +416,11 @@ func (c *Collections) addFolderToTree(
|
|||||||
notSelected bool
|
notSelected bool
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// check container limits before adding the next new folder
|
||||||
|
if !tree.containsFolder(folderID) && limiter.hitContainerLimit(tree.countLiveFolders()) {
|
||||||
|
return nil, errHitLimit
|
||||||
|
}
|
||||||
|
|
||||||
if parent != nil {
|
if parent != nil {
|
||||||
parentID = ptr.Val(parent.GetId())
|
parentID = ptr.Val(parent.GetId())
|
||||||
}
|
}
|
||||||
@ -541,18 +515,18 @@ func (c *Collections) addFileToTree(
|
|||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
file models.DriveItemable,
|
file models.DriveItemable,
|
||||||
limiter *pagerLimiter,
|
limiter *pagerLimiter,
|
||||||
stats *driveEnumerationStats,
|
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
) (*fault.Skipped, error) {
|
) (*fault.Skipped, error) {
|
||||||
var (
|
var (
|
||||||
driveID = ptr.Val(drv.GetId())
|
driveID = ptr.Val(drv.GetId())
|
||||||
fileID = ptr.Val(file.GetId())
|
fileID = ptr.Val(file.GetId())
|
||||||
fileName = ptr.Val(file.GetName())
|
fileName = ptr.Val(file.GetName())
|
||||||
fileSize = ptr.Val(file.GetSize())
|
fileSize = ptr.Val(file.GetSize())
|
||||||
isDeleted = file.GetDeleted() != nil
|
lastModified = ptr.Val(file.GetLastModifiedDateTime())
|
||||||
isMalware = file.GetMalware() != nil
|
isDeleted = file.GetDeleted() != nil
|
||||||
parent = file.GetParentReference()
|
isMalware = file.GetMalware() != nil
|
||||||
parentID string
|
parent = file.GetParentReference()
|
||||||
|
parentID string
|
||||||
)
|
)
|
||||||
|
|
||||||
if parent != nil {
|
if parent != nil {
|
||||||
@ -583,53 +557,37 @@ func (c *Collections) addFileToTree(
|
|||||||
return skip, nil
|
return skip, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, alreadySeen := tree.fileIDToParentID[fileID]
|
|
||||||
|
|
||||||
if isDeleted {
|
if isDeleted {
|
||||||
tree.deleteFile(fileID)
|
tree.deleteFile(fileID)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
if alreadySeen {
|
_, alreadySeen := tree.fileIDToParentID[fileID]
|
||||||
stats.numAddedFiles--
|
parentNode, parentNotNil := tree.folderIDToNode[parentID]
|
||||||
// FIXME(keepers): this might be faulty,
|
|
||||||
// since deletes may not include the file size.
|
if parentNotNil && !alreadySeen {
|
||||||
// it will likely need to be tracked in
|
countSize := tree.countLiveFilesAndSizes()
|
||||||
// the tree alongside the file modtime.
|
|
||||||
stats.numBytes -= fileSize
|
// Don't add new items if the new collection has already reached it's limit.
|
||||||
} else {
|
// item moves and updates are generally allowed through.
|
||||||
c.NumItems++
|
if limiter.atContainerItemsLimit(len(parentNode.files)) || limiter.hitItemLimit(countSize.numFiles) {
|
||||||
c.NumFiles++
|
return nil, errHitLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
// Skip large files that don't fit within the size limit.
|
||||||
|
// unlike the other checks, which see if we're already at the limit, this check
|
||||||
|
// needs to be forward-facing to ensure we don't go far over the limit.
|
||||||
|
// Example case: a 1gb limit and a 25gb file.
|
||||||
|
if limiter.hitTotalBytesLimit(fileSize + countSize.totalBytes) {
|
||||||
|
return nil, errHitLimit
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parentNode, ok := tree.folderIDToNode[parentID]
|
err := tree.addFile(parentID, fileID, lastModified, fileSize)
|
||||||
|
|
||||||
// Don't add new items if the new collection is already reached it's limit.
|
|
||||||
// item moves and updates are generally allowed through.
|
|
||||||
if ok && !alreadySeen && limiter.atContainerItemsLimit(len(parentNode.files)) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip large files that don't fit within the size limit.
|
|
||||||
if limiter.aboveSizeLimit(fileSize + stats.numBytes) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := tree.addFile(parentID, fileID, ptr.Val(file.GetLastModifiedDateTime()))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.StackWC(ctx, err)
|
return nil, clues.StackWC(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only increment counters for new files
|
|
||||||
if !alreadySeen {
|
|
||||||
// todo: remmove c.NumItems/Files in favor of counter and tree counting.
|
|
||||||
c.NumItems++
|
|
||||||
c.NumFiles++
|
|
||||||
stats.numAddedFiles++
|
|
||||||
stats.numBytes += fileSize
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -80,8 +80,8 @@ type nodeyMcNodeFace struct {
|
|||||||
prev path.Elements
|
prev path.Elements
|
||||||
// folderID -> node
|
// folderID -> node
|
||||||
children map[string]*nodeyMcNodeFace
|
children map[string]*nodeyMcNodeFace
|
||||||
// file item ID -> last modified time
|
// file item ID -> file metadata
|
||||||
files map[string]time.Time
|
files map[string]fileyMcFileFace
|
||||||
// for special handling protocols around packages
|
// for special handling protocols around packages
|
||||||
isPackage bool
|
isPackage bool
|
||||||
}
|
}
|
||||||
@ -96,11 +96,16 @@ func newNodeyMcNodeFace(
|
|||||||
id: id,
|
id: id,
|
||||||
name: name,
|
name: name,
|
||||||
children: map[string]*nodeyMcNodeFace{},
|
children: map[string]*nodeyMcNodeFace{},
|
||||||
files: map[string]time.Time{},
|
files: map[string]fileyMcFileFace{},
|
||||||
isPackage: isPackage,
|
isPackage: isPackage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fileyMcFileFace struct {
|
||||||
|
lastModified time.Time
|
||||||
|
contentSize int64
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// folder handling
|
// folder handling
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -114,10 +119,10 @@ func (face *folderyMcFolderFace) containsFolder(id string) bool {
|
|||||||
return stillKicking || alreadyBuried
|
return stillKicking || alreadyBuried
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountNodes returns a count that is the sum of live folders and
|
// countLiveFolders returns a count of the number of folders held in the tree.
|
||||||
// tombstones recorded in the tree.
|
// Tombstones are not included in the count. Only live folders.
|
||||||
func (face *folderyMcFolderFace) countFolders() int {
|
func (face *folderyMcFolderFace) countLiveFolders() int {
|
||||||
return len(face.tombstones) + len(face.folderIDToNode)
|
return len(face.folderIDToNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (face *folderyMcFolderFace) getNode(id string) *nodeyMcNodeFace {
|
func (face *folderyMcFolderFace) getNode(id string) *nodeyMcNodeFace {
|
||||||
@ -264,12 +269,52 @@ func (face *folderyMcFolderFace) setTombstone(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type countAndSize struct {
|
||||||
|
numFiles int
|
||||||
|
totalBytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// countLiveFilesAndSizes returns a count of the number of files in the tree
|
||||||
|
// and the sum of all of their sizes. Only includes files that are not
|
||||||
|
// children of tombstoned containers. If running an incremental backup, a
|
||||||
|
// live file may be either a creation or an update.
|
||||||
|
func (face *folderyMcFolderFace) countLiveFilesAndSizes() countAndSize {
|
||||||
|
return countFilesAndSizes(face.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func countFilesAndSizes(nodey *nodeyMcNodeFace) countAndSize {
|
||||||
|
if nodey == nil {
|
||||||
|
return countAndSize{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
fileCount int
|
||||||
|
sumContentSize int64
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, child := range nodey.children {
|
||||||
|
countSize := countFilesAndSizes(child)
|
||||||
|
fileCount += countSize.numFiles
|
||||||
|
sumContentSize += countSize.totalBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range nodey.files {
|
||||||
|
sumContentSize += file.contentSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return countAndSize{
|
||||||
|
numFiles: fileCount + len(nodey.files),
|
||||||
|
totalBytes: sumContentSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// addFile places the file in the correct parent node. If the
|
// addFile places the file in the correct parent node. If the
|
||||||
// file was already added to the tree and is getting relocated,
|
// file was already added to the tree and is getting relocated,
|
||||||
// this func will update and/or clean up all the old references.
|
// this func will update and/or clean up all the old references.
|
||||||
func (face *folderyMcFolderFace) addFile(
|
func (face *folderyMcFolderFace) addFile(
|
||||||
parentID, id string,
|
parentID, id string,
|
||||||
lastModifed time.Time,
|
lastModified time.Time,
|
||||||
|
contentSize int64,
|
||||||
) error {
|
) error {
|
||||||
if len(parentID) == 0 {
|
if len(parentID) == 0 {
|
||||||
return clues.New("item added without parent folder ID")
|
return clues.New("item added without parent folder ID")
|
||||||
@ -298,7 +343,10 @@ func (face *folderyMcFolderFace) addFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
face.fileIDToParentID[id] = parentID
|
face.fileIDToParentID[id] = parentID
|
||||||
parent.files[id] = lastModifed
|
parent.files[id] = fileyMcFileFace{
|
||||||
|
lastModified: lastModified,
|
||||||
|
contentSize: contentSize,
|
||||||
|
}
|
||||||
|
|
||||||
delete(face.deletedFileIDs, id)
|
delete(face.deletedFileIDs, id)
|
||||||
|
|
||||||
|
|||||||
@ -40,6 +40,7 @@ func treeWithFolders() *folderyMcFolderFace {
|
|||||||
|
|
||||||
o := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
o := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
||||||
tree.folderIDToNode[o.id] = o
|
tree.folderIDToNode[o.id] = o
|
||||||
|
tree.root.children[o.id] = o
|
||||||
|
|
||||||
f := newNodeyMcNodeFace(o, id(folder), name(folder), false)
|
f := newNodeyMcNodeFace(o, id(folder), name(folder), false)
|
||||||
tree.folderIDToNode[f.id] = f
|
tree.folderIDToNode[f.id] = f
|
||||||
@ -49,16 +50,22 @@ func treeWithFolders() *folderyMcFolderFace {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFileAtRoot() *folderyMcFolderFace {
|
func treeWithFileAtRoot() *folderyMcFolderFace {
|
||||||
tree := treeWithFolders()
|
tree := treeWithRoot()
|
||||||
tree.root.files[id(file)] = time.Now()
|
tree.root.files[id(file)] = fileyMcFileFace{
|
||||||
|
lastModified: time.Now(),
|
||||||
|
contentSize: 42,
|
||||||
|
}
|
||||||
tree.fileIDToParentID[id(file)] = rootID
|
tree.fileIDToParentID[id(file)] = rootID
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFileInFolder() *folderyMcFolderFace {
|
func treeWithFileInFolder() *folderyMcFolderFace {
|
||||||
tree := treeWithFileAtRoot()
|
tree := treeWithFolders()
|
||||||
tree.folderIDToNode[id(folder)].files[id(file)] = time.Now()
|
tree.folderIDToNode[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||||
|
lastModified: time.Now(),
|
||||||
|
contentSize: 42,
|
||||||
|
}
|
||||||
tree.fileIDToParentID[id(file)] = id(folder)
|
tree.fileIDToParentID[id(file)] = id(folder)
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
@ -66,7 +73,10 @@ func treeWithFileInFolder() *folderyMcFolderFace {
|
|||||||
|
|
||||||
func treeWithFileInTombstone() *folderyMcFolderFace {
|
func treeWithFileInTombstone() *folderyMcFolderFace {
|
||||||
tree := treeWithTombstone()
|
tree := treeWithTombstone()
|
||||||
tree.tombstones[id(folder)].files[id(file)] = time.Now()
|
tree.tombstones[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||||
|
lastModified: time.Now(),
|
||||||
|
contentSize: 42,
|
||||||
|
}
|
||||||
tree.fileIDToParentID[id(file)] = id(folder)
|
tree.fileIDToParentID[id(file)] = id(folder)
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
@ -689,6 +699,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree *folderyMcFolderFace
|
tree *folderyMcFolderFace
|
||||||
oldParentID string
|
oldParentID string
|
||||||
parentID string
|
parentID string
|
||||||
|
contentSize int64
|
||||||
expectErr assert.ErrorAssertionFunc
|
expectErr assert.ErrorAssertionFunc
|
||||||
expectFiles map[string]string
|
expectFiles map[string]string
|
||||||
}{
|
}{
|
||||||
@ -697,6 +708,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithRoot(),
|
tree: treeWithRoot(),
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
|
contentSize: 42,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectFiles: map[string]string{id(file): rootID},
|
expectFiles: map[string]string{id(file): rootID},
|
||||||
},
|
},
|
||||||
@ -705,6 +717,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithFolders(),
|
tree: treeWithFolders(),
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
|
contentSize: 24,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectFiles: map[string]string{id(file): id(folder)},
|
expectFiles: map[string]string{id(file): id(folder)},
|
||||||
},
|
},
|
||||||
@ -713,6 +726,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithFileAtRoot(),
|
tree: treeWithFileAtRoot(),
|
||||||
oldParentID: rootID,
|
oldParentID: rootID,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
|
contentSize: 84,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectFiles: map[string]string{id(file): rootID},
|
expectFiles: map[string]string{id(file): rootID},
|
||||||
},
|
},
|
||||||
@ -721,6 +735,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithFileInFolder(),
|
tree: treeWithFileInFolder(),
|
||||||
oldParentID: id(folder),
|
oldParentID: id(folder),
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
|
contentSize: 48,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectFiles: map[string]string{id(file): rootID},
|
expectFiles: map[string]string{id(file): rootID},
|
||||||
},
|
},
|
||||||
@ -729,6 +744,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithFileInTombstone(),
|
tree: treeWithFileInTombstone(),
|
||||||
oldParentID: id(folder),
|
oldParentID: id(folder),
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
|
contentSize: 2,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectFiles: map[string]string{id(file): rootID},
|
expectFiles: map[string]string{id(file): rootID},
|
||||||
},
|
},
|
||||||
@ -737,6 +753,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone(),
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
|
contentSize: 4,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
expectFiles: map[string]string{},
|
expectFiles: map[string]string{},
|
||||||
},
|
},
|
||||||
@ -745,6 +762,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone(),
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: idx(folder, 1),
|
parentID: idx(folder, 1),
|
||||||
|
contentSize: 8,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
expectFiles: map[string]string{},
|
expectFiles: map[string]string{},
|
||||||
},
|
},
|
||||||
@ -753,6 +771,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone(),
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: "",
|
parentID: "",
|
||||||
|
contentSize: 16,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
expectFiles: map[string]string{},
|
expectFiles: map[string]string{},
|
||||||
},
|
},
|
||||||
@ -764,7 +783,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
err := test.tree.addFile(
|
err := test.tree.addFile(
|
||||||
test.parentID,
|
test.parentID,
|
||||||
id(file),
|
id(file),
|
||||||
time.Now())
|
time.Now(),
|
||||||
|
test.contentSize)
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
assert.Equal(t, test.expectFiles, test.tree.fileIDToParentID)
|
assert.Equal(t, test.expectFiles, test.tree.fileIDToParentID)
|
||||||
|
|
||||||
@ -777,6 +797,10 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
require.NotNil(t, parent)
|
require.NotNil(t, parent)
|
||||||
assert.Contains(t, parent.files, id(file))
|
assert.Contains(t, parent.files, id(file))
|
||||||
|
|
||||||
|
countSize := test.tree.countLiveFilesAndSizes()
|
||||||
|
assert.Equal(t, 1, countSize.numFiles, "should have one file in the tree")
|
||||||
|
assert.Equal(t, test.contentSize, countSize.totalBytes, "tree should be sized to test file contents")
|
||||||
|
|
||||||
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
||||||
old, ok := test.tree.folderIDToNode[test.oldParentID]
|
old, ok := test.tree.folderIDToNode[test.oldParentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -848,7 +872,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
|||||||
assert.Len(t, tree.deletedFileIDs, 1)
|
assert.Len(t, tree.deletedFileIDs, 1)
|
||||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||||
|
|
||||||
err := tree.addFile(rootID, fID, time.Now())
|
err := tree.addFile(rootID, fID, time.Now(), defaultItemSize)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Len(t, tree.fileIDToParentID, 1)
|
assert.Len(t, tree.fileIDToParentID, 1)
|
||||||
|
|||||||
@ -6,9 +6,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
)
|
)
|
||||||
|
|
||||||
// used to mark an unused variable while we transition handling.
|
|
||||||
const ignoreMe = -1
|
|
||||||
|
|
||||||
var errHitLimit = clues.New("hit limiter limits")
|
var errHitLimit = clues.New("hit limiter limits")
|
||||||
|
|
||||||
type driveEnumerationStats struct {
|
type driveEnumerationStats struct {
|
||||||
@ -62,10 +59,6 @@ func (l pagerLimiter) sizeLimit() int64 {
|
|||||||
return l.limits.MaxBytes
|
return l.limits.MaxBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l pagerLimiter) aboveSizeLimit(i int64) bool {
|
|
||||||
return l.limits.Enabled && (i >= l.limits.MaxBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// atItemLimit returns true if the limiter is enabled and has reached the limit
|
// atItemLimit returns true if the limiter is enabled and has reached the limit
|
||||||
// for individual items added to collections for this backup.
|
// for individual items added to collections for this backup.
|
||||||
func (l pagerLimiter) atItemLimit(stats *driveEnumerationStats) bool {
|
func (l pagerLimiter) atItemLimit(stats *driveEnumerationStats) bool {
|
||||||
@ -81,7 +74,7 @@ func (l pagerLimiter) atContainerItemsLimit(numItems int) bool {
|
|||||||
return l.enabled() && numItems >= l.limits.MaxItemsPerContainer
|
return l.enabled() && numItems >= l.limits.MaxItemsPerContainer
|
||||||
}
|
}
|
||||||
|
|
||||||
// atContainerPageLimit returns true if the limiter is enabled and the number of
|
// atPageLimit returns true if the limiter is enabled and the number of
|
||||||
// pages processed so far is beyond the limit for this backup.
|
// pages processed so far is beyond the limit for this backup.
|
||||||
func (l pagerLimiter) atPageLimit(stats *driveEnumerationStats) bool {
|
func (l pagerLimiter) atPageLimit(stats *driveEnumerationStats) bool {
|
||||||
return l.enabled() && stats.numPages >= l.limits.MaxPages
|
return l.enabled() && stats.numPages >= l.limits.MaxPages
|
||||||
@ -89,17 +82,38 @@ func (l pagerLimiter) atPageLimit(stats *driveEnumerationStats) bool {
|
|||||||
|
|
||||||
// atLimit returns true if the limiter is enabled and meets any of the
|
// atLimit returns true if the limiter is enabled and meets any of the
|
||||||
// conditions for max items, containers, etc for this backup.
|
// conditions for max items, containers, etc for this backup.
|
||||||
func (l pagerLimiter) atLimit(
|
func (l pagerLimiter) atLimit(stats *driveEnumerationStats) bool {
|
||||||
stats *driveEnumerationStats,
|
|
||||||
containerCount int,
|
|
||||||
) bool {
|
|
||||||
nc := stats.numContainers
|
|
||||||
if nc == 0 && containerCount > 0 {
|
|
||||||
nc = containerCount
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.enabled() &&
|
return l.enabled() &&
|
||||||
(l.atItemLimit(stats) ||
|
(l.atItemLimit(stats) ||
|
||||||
nc >= l.limits.MaxContainers ||
|
stats.numContainers >= l.limits.MaxContainers ||
|
||||||
stats.numPages >= l.limits.MaxPages)
|
stats.numPages >= l.limits.MaxPages)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Used by the tree version limit handling
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// hitPageLimit returns true if the limiter is enabled and the number of
|
||||||
|
// pages processed so far is beyond the limit for this backup.
|
||||||
|
func (l pagerLimiter) hitPageLimit(pageCount int) bool {
|
||||||
|
return l.enabled() && pageCount >= l.limits.MaxPages
|
||||||
|
}
|
||||||
|
|
||||||
|
// hitContainerLimit returns true if the limiter is enabled and the number of
|
||||||
|
// unique containers added so far is beyond the limit for this backup.
|
||||||
|
func (l pagerLimiter) hitContainerLimit(containerCount int) bool {
|
||||||
|
return l.enabled() && containerCount >= l.limits.MaxContainers
|
||||||
|
}
|
||||||
|
|
||||||
|
// hitItemLimit returns true if the limiter is enabled and has reached the limit
|
||||||
|
// for unique items added to collections for this backup.
|
||||||
|
func (l pagerLimiter) hitItemLimit(itemCount int) bool {
|
||||||
|
return l.enabled() && itemCount >= l.limits.MaxItems
|
||||||
|
}
|
||||||
|
|
||||||
|
// hitTotalBytesLimit returns true if the limiter is enabled and has reached the limit
|
||||||
|
// for the accumulated byte size of all items (the file contents, not the item metadata)
|
||||||
|
// added to collections for this backup.
|
||||||
|
func (l pagerLimiter) hitTotalBytesLimit(i int64) bool {
|
||||||
|
return l.enabled() && i >= l.limits.MaxBytes
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user