add test.t and pfx to test tree generators (#4777)
this is needed to standardize the presence of a path prefix in all test-helper trees, so that we can use standard test factory helpers for producing complete post-process data. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🤖 Supportability/Tests #### Issue(s) * #4689 #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
f62760f65a
commit
b94e5a677d
@ -292,11 +292,11 @@ func DeserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
|
|||||||
func (c *Collections) Get(
|
func (c *Collections) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevMetadata []data.RestoreCollection,
|
prevMetadata []data.RestoreCollection,
|
||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
globalExcludeItemIDs *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
||||||
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, globalExcludeItemIDs, errs)
|
||||||
if err != nil && !errors.Is(err, errGetTreeNotImplemented) {
|
if err != nil && !errors.Is(err, errGetTreeNotImplemented) {
|
||||||
return nil, false, clues.Wrap(err, "processing backup using tree")
|
return nil, false, clues.Wrap(err, "processing backup using tree")
|
||||||
}
|
}
|
||||||
@ -457,7 +457,7 @@ func (c *Collections) Get(
|
|||||||
return nil, false, clues.WrapWC(ictx, err, "making exclude prefix")
|
return nil, false, clues.WrapWC(ictx, err, "making exclude prefix")
|
||||||
}
|
}
|
||||||
|
|
||||||
ssmb.Add(p.String(), excludedItemIDs)
|
globalExcludeItemIDs.Add(p.String(), excludedItemIDs)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package drive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
@ -33,7 +34,7 @@ import (
|
|||||||
func (c *Collections) getTree(
|
func (c *Collections) getTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevMetadata []data.RestoreCollection,
|
prevMetadata []data.RestoreCollection,
|
||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
globalExcludeItemIDsByDrivePrefix *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
ctx = clues.AddTraceName(ctx, "GetTree")
|
ctx = clues.AddTraceName(ctx, "GetTree")
|
||||||
@ -114,6 +115,7 @@ func (c *Collections) getTree(
|
|||||||
prevPathsByDriveID[driveID],
|
prevPathsByDriveID[driveID],
|
||||||
deltasByDriveID[driveID],
|
deltasByDriveID[driveID],
|
||||||
limiter,
|
limiter,
|
||||||
|
globalExcludeItemIDsByDrivePrefix,
|
||||||
cl,
|
cl,
|
||||||
el)
|
el)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -168,15 +170,18 @@ func (c *Collections) makeDriveCollections(
|
|||||||
prevPaths map[string]string,
|
prevPaths map[string]string,
|
||||||
prevDeltaLink string,
|
prevDeltaLink string,
|
||||||
limiter *pagerLimiter,
|
limiter *pagerLimiter,
|
||||||
|
globalExcludeItemIDsByDrivePrefix *prefixmatcher.StringSetMatchBuilder,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, map[string]string, pagers.DeltaUpdate, error) {
|
) ([]data.BackupCollection, map[string]string, pagers.DeltaUpdate, error) {
|
||||||
ppfx, err := c.handler.PathPrefix(c.tenantID, ptr.Val(drv.GetId()))
|
driveID := ptr.Val(drv.GetId())
|
||||||
|
|
||||||
|
ppfx, err := c.handler.PathPrefix(c.tenantID, driveID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
||||||
}
|
}
|
||||||
|
|
||||||
root, err := c.handler.GetRootFolder(ctx, ptr.Val(drv.GetId()))
|
root, err := c.handler.GetRootFolder(ctx, driveID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "getting root folder")
|
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "getting root folder")
|
||||||
}
|
}
|
||||||
@ -187,7 +192,7 @@ func (c *Collections) makeDriveCollections(
|
|||||||
|
|
||||||
// --- delta item aggregation
|
// --- delta item aggregation
|
||||||
|
|
||||||
du, err := c.populateTree(
|
du, countPagesInDelta, err := c.populateTree(
|
||||||
ctx,
|
ctx,
|
||||||
tree,
|
tree,
|
||||||
drv,
|
drv,
|
||||||
@ -199,74 +204,44 @@ func (c *Collections) makeDriveCollections(
|
|||||||
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err)
|
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// numDriveItems := c.NumItems - numPrevItems
|
|
||||||
// numPrevItems = c.NumItems
|
|
||||||
|
|
||||||
// cl.Add(count.NewPrevPaths, int64(len(newPrevPaths)))
|
|
||||||
|
|
||||||
// --- prev path incorporation
|
// --- prev path incorporation
|
||||||
|
|
||||||
for folderID, p := range prevPaths {
|
err = addPrevPathsToTree(
|
||||||
// no check for errs.Failure here, despite the addRecoverable below.
|
ctx,
|
||||||
// it's fine if we run through all of the collection generation even
|
tree,
|
||||||
// with failures present, and let the backup finish out.
|
prevPaths,
|
||||||
prevPath, err := path.FromDataLayerPath(p, false)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "invalid previous path").
|
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)
|
||||||
With("folderID", folderID, "prev_path", p).
|
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tree.setPreviousPath(folderID, prevPath)
|
|
||||||
if err != nil {
|
|
||||||
errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "setting previous path").
|
|
||||||
With("folderID", folderID, "prev_path", p).
|
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(keepers): leaving this code around for now as a guide
|
|
||||||
// while implementation progresses.
|
|
||||||
|
|
||||||
// --- post-processing
|
// --- post-processing
|
||||||
|
|
||||||
// Attach an url cache to the drive if the number of discovered items is
|
collections, newPrevs, excludedItemIDs, err := c.turnTreeIntoCollections(
|
||||||
// below the threshold. Attaching cache to larger drives can cause
|
ctx,
|
||||||
// performance issues since cache delta queries start taking up majority of
|
tree,
|
||||||
// the hour the refreshed URLs are valid for.
|
driveID,
|
||||||
|
prevDeltaLink,
|
||||||
// if numDriveItems < urlCacheDriveItemThreshold {
|
countPagesInDelta,
|
||||||
// logger.Ctx(ictx).Infow(
|
errs)
|
||||||
// "adding url cache for drive",
|
if err != nil {
|
||||||
// "num_drive_items", numDriveItems)
|
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)
|
||||||
|
|
||||||
// uc, err := newURLCache(
|
|
||||||
// driveID,
|
|
||||||
// prevDeltaLink,
|
|
||||||
// urlCacheRefreshInterval,
|
|
||||||
// c.handler,
|
|
||||||
// cl,
|
|
||||||
// errs)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, false, clues.Stack(err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Set the URL cache instance for all collections in this drive.
|
|
||||||
// for id := range c.CollectionMap[driveID] {
|
|
||||||
// c.CollectionMap[driveID][id].urlCache = uc
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// this is a dumb hack to satisfy the linter.
|
|
||||||
if ctx == nil {
|
|
||||||
return nil, nil, du, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, du, errGetTreeNotImplemented
|
// only populate the global excluded items if no delta reset occurred.
|
||||||
|
// if a reset did occur, the collections should already be marked as
|
||||||
|
// "do not merge", therefore everything will get processed as a new addition.
|
||||||
|
if !tree.hadReset {
|
||||||
|
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
|
||||||
|
if err != nil {
|
||||||
|
err = clues.WrapWC(ctx, err, "making canonical path for item exclusions")
|
||||||
|
return nil, nil, pagers.DeltaUpdate{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
globalExcludeItemIDsByDrivePrefix.Add(p.String(), excludedItemIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
return collections, newPrevs, du, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// populateTree constructs a new tree and populates it with items
|
// populateTree constructs a new tree and populates it with items
|
||||||
@ -279,8 +254,8 @@ func (c *Collections) populateTree(
|
|||||||
limiter *pagerLimiter,
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (pagers.DeltaUpdate, error) {
|
) (pagers.DeltaUpdate, int, error) {
|
||||||
ctx = clues.Add(ctx, "invalid_prev_delta", len(prevDeltaLink) == 0)
|
ctx = clues.Add(ctx, "has_prev_delta", len(prevDeltaLink) > 0)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
currDeltaLink = prevDeltaLink
|
currDeltaLink = prevDeltaLink
|
||||||
@ -290,24 +265,49 @@ func (c *Collections) populateTree(
|
|||||||
finished bool
|
finished bool
|
||||||
hitLimit bool
|
hitLimit bool
|
||||||
// TODO: plug this into the limiter
|
// TODO: plug this into the limiter
|
||||||
maxDeltas = 100
|
maximumTotalDeltasAllowed int64 = 100
|
||||||
countDeltas = 0
|
// pageCounter is intended as a separate local instance
|
||||||
|
// compared to the counter we use for other item tracking.
|
||||||
|
// IE: don't pass it around into other funcs.
|
||||||
|
//
|
||||||
|
// This allows us to reset pageCounter on a reset without
|
||||||
|
// cross-contaminating other counts.
|
||||||
|
//
|
||||||
|
// We use this to track three keys: 1. the total number of
|
||||||
|
// deltas enumerated (so that we don't hit an infinite
|
||||||
|
// loop); 2. the number of pages in each delta (for the
|
||||||
|
// limiter, but also for the URL cache so that it knows
|
||||||
|
// if we have too many pages for it to efficiently operate);
|
||||||
|
// and 3. the number of items in each delta (to know if we're
|
||||||
|
// done enumerating delta queries).
|
||||||
|
pageCounter = counter.Local()
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// track the exact number of pages across all deltas (correct across resets)
|
||||||
|
// so that the url cache knows if it can operate within performance bounds.
|
||||||
|
truePageCount count.Key = "pages-with-items-across-all-deltas"
|
||||||
)
|
)
|
||||||
|
|
||||||
// enumerate through multiple deltas until we either:
|
// enumerate through multiple deltas until we either:
|
||||||
// 1. hit a consistent state (ie: no changes since last delta enum)
|
// 1. hit a consistent state (ie: no changes since last delta enum)
|
||||||
// 2. hit the limit
|
// 2. hit the limit based on the limiter
|
||||||
|
// 3. run 100 total delta enumerations without hitting 1. (no infinite loops)
|
||||||
for !hitLimit && !finished && el.Failure() == nil {
|
for !hitLimit && !finished && el.Failure() == nil {
|
||||||
counter.Inc(count.TotalDeltasProcessed)
|
counter.Inc(count.TotalDeltasProcessed)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
pageCount int
|
// this is used to track stats the total number of items
|
||||||
pageItemCount int
|
// processed in each delta. Since delta queries don't give
|
||||||
err error
|
// us a plain flag for "no changes occurred", we check for
|
||||||
|
// 0 items in the delta as the "no changes occurred" state.
|
||||||
|
// The final page of any delta query may also return 0 items,
|
||||||
|
// so we need to combine both the item count and the deltaPageCount
|
||||||
|
// to get a correct flag.
|
||||||
|
iPageCounter = pageCounter.Local()
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
countDeltas++
|
|
||||||
|
|
||||||
pager := c.handler.EnumerateDriveItemsDelta(
|
pager := c.handler.EnumerateDriveItemsDelta(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
@ -318,19 +318,22 @@ func (c *Collections) populateTree(
|
|||||||
|
|
||||||
for page, reset, done := pager.NextPage(); !done; page, reset, done = pager.NextPage() {
|
for page, reset, done := pager.NextPage(); !done; page, reset, done = pager.NextPage() {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return du, el.Failure()
|
return du, 0, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// track the exact number of pages within a single delta (correct across resets)
|
||||||
|
// so that we can check for "no changes occurred" results.
|
||||||
|
// Note: don't inc `count.TotalPagesEnumerated` outside of this (ie, for the
|
||||||
|
// truePageCount), or else we'll double up on the inc.
|
||||||
|
iPageCounter.Inc(count.TotalPagesEnumerated)
|
||||||
|
|
||||||
if reset {
|
if reset {
|
||||||
counter.Inc(count.PagerResets)
|
counter.Inc(count.PagerResets)
|
||||||
tree.reset()
|
tree.reset()
|
||||||
c.resetStats()
|
c.resetStats()
|
||||||
|
|
||||||
pageCount = 0
|
pageCounter = counter.Local()
|
||||||
pageItemCount = 0
|
iPageCounter = pageCounter.Local()
|
||||||
countDeltas = 0
|
|
||||||
} else {
|
|
||||||
counter.Inc(count.TotalPagesEnumerated)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.enumeratePageOfItems(
|
err = c.enumeratePageOfItems(
|
||||||
@ -350,14 +353,17 @@ func (c *Collections) populateTree(
|
|||||||
el.AddRecoverable(ctx, clues.Stack(err))
|
el.AddRecoverable(ctx, clues.Stack(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
pageCount++
|
itemCount := int64(len(page))
|
||||||
|
iPageCounter.Add(count.TotalItemsProcessed, itemCount)
|
||||||
|
|
||||||
pageItemCount += len(page)
|
if itemCount > 0 {
|
||||||
|
pageCounter.Inc(truePageCount)
|
||||||
|
}
|
||||||
|
|
||||||
// Stop enumeration early if we've reached the page limit. Keep this
|
// Stop enumeration early if we've reached the total page limit. Keep this
|
||||||
// at the end of the loop so we don't request another page (pager.NextPage)
|
// at the end of the loop so we don't request another page (pager.NextPage)
|
||||||
// before seeing we've passed the limit.
|
// before seeing we've passed the limit.
|
||||||
if limiter.hitPageLimit(pageCount) {
|
if limiter.hitPageLimit(int(pageCounter.Get(truePageCount))) {
|
||||||
hitLimit = true
|
hitLimit = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -370,23 +376,32 @@ func (c *Collections) populateTree(
|
|||||||
|
|
||||||
du, err = pager.Results()
|
du, err = pager.Results()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return du, clues.Stack(err)
|
return du, 0, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
currDeltaLink = du.URL
|
currDeltaLink = du.URL
|
||||||
|
|
||||||
// 0 pages is never expected. We should at least have one (empty) page to
|
// 0 pages is never expected. We should at least have one (empty) page to
|
||||||
// consume. But checking pageCount == 1 is brittle in a non-helpful way.
|
// consume. But checking pageCount == 1 is brittle in a non-helpful way.
|
||||||
finished = pageCount < 2 && pageItemCount == 0
|
finished = iPageCounter.Get(count.TotalPagesEnumerated) < 2 &&
|
||||||
|
iPageCounter.Get(count.TotalItemsProcessed) == 0
|
||||||
|
|
||||||
if countDeltas >= maxDeltas {
|
// ensure we don't enumerate more than the maximum allotted count of deltas.
|
||||||
return pagers.DeltaUpdate{}, clues.New("unable to produce consistent delta after 100 queries")
|
if counter.Get(count.TotalDeltasProcessed) >= maximumTotalDeltasAllowed {
|
||||||
|
err := clues.NewWC(
|
||||||
|
ctx,
|
||||||
|
fmt.Sprintf("unable to produce consistent delta after %d queries", maximumTotalDeltasAllowed))
|
||||||
|
|
||||||
|
return pagers.DeltaUpdate{}, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("enumerated collection delta", "stats", counter.Values())
|
logger.Ctx(ctx).Infow(
|
||||||
|
"enumerated collection delta",
|
||||||
|
"stats", counter.Values(),
|
||||||
|
"delta_stats", pageCounter.Values())
|
||||||
|
|
||||||
return du, el.Failure()
|
return du, int(pageCounter.Get(truePageCount)), el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Collections) enumeratePageOfItems(
|
func (c *Collections) enumeratePageOfItems(
|
||||||
@ -401,12 +416,13 @@ func (c *Collections) enumeratePageOfItems(
|
|||||||
ctx = clues.Add(ctx, "page_lenth", len(page))
|
ctx = clues.Add(ctx, "page_lenth", len(page))
|
||||||
el := errs.Local()
|
el := errs.Local()
|
||||||
|
|
||||||
for i, item := range page {
|
for i, driveItem := range page {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
item = custom.ToCustomDriveItem(driveItem)
|
||||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||||
isFile = item.GetFile() != nil
|
isFile = item.GetFile() != nil
|
||||||
itemID = ptr.Val(item.GetId())
|
itemID = ptr.Val(item.GetId())
|
||||||
@ -452,7 +468,7 @@ func (c *Collections) addFolderToTree(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
tree *folderyMcFolderFace,
|
tree *folderyMcFolderFace,
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
folder models.DriveItemable,
|
folder *custom.DriveItem,
|
||||||
limiter *pagerLimiter,
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
) (*fault.Skipped, error) {
|
) (*fault.Skipped, error) {
|
||||||
@ -501,7 +517,7 @@ func (c *Collections) addFolderToTree(
|
|||||||
driveID,
|
driveID,
|
||||||
folderID,
|
folderID,
|
||||||
folderName,
|
folderName,
|
||||||
graph.ItemInfo(custom.ToCustomDriveItem(folder)))
|
graph.ItemInfo(folder))
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("malware folder detected")
|
logger.Ctx(ctx).Infow("malware folder detected")
|
||||||
|
|
||||||
@ -533,7 +549,7 @@ func (c *Collections) addFolderToTree(
|
|||||||
func (c *Collections) makeFolderCollectionPath(
|
func (c *Collections) makeFolderCollectionPath(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
driveID string,
|
driveID string,
|
||||||
folder models.DriveItemable,
|
folder *custom.DriveItem,
|
||||||
) (path.Path, error) {
|
) (path.Path, error) {
|
||||||
if folder.GetRoot() != nil {
|
if folder.GetRoot() != nil {
|
||||||
pb := odConsts.DriveFolderPrefixBuilder(driveID)
|
pb := odConsts.DriveFolderPrefixBuilder(driveID)
|
||||||
@ -565,20 +581,19 @@ func (c *Collections) addFileToTree(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
tree *folderyMcFolderFace,
|
tree *folderyMcFolderFace,
|
||||||
drv models.Driveable,
|
drv models.Driveable,
|
||||||
file models.DriveItemable,
|
file *custom.DriveItem,
|
||||||
limiter *pagerLimiter,
|
limiter *pagerLimiter,
|
||||||
counter *count.Bus,
|
counter *count.Bus,
|
||||||
) (*fault.Skipped, error) {
|
) (*fault.Skipped, error) {
|
||||||
var (
|
var (
|
||||||
driveID = ptr.Val(drv.GetId())
|
driveID = ptr.Val(drv.GetId())
|
||||||
fileID = ptr.Val(file.GetId())
|
fileID = ptr.Val(file.GetId())
|
||||||
fileName = ptr.Val(file.GetName())
|
fileName = ptr.Val(file.GetName())
|
||||||
fileSize = ptr.Val(file.GetSize())
|
fileSize = ptr.Val(file.GetSize())
|
||||||
lastModified = ptr.Val(file.GetLastModifiedDateTime())
|
isDeleted = file.GetDeleted() != nil
|
||||||
isDeleted = file.GetDeleted() != nil
|
isMalware = file.GetMalware() != nil
|
||||||
isMalware = file.GetMalware() != nil
|
parent = file.GetParentReference()
|
||||||
parent = file.GetParentReference()
|
parentID string
|
||||||
parentID string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if parent != nil {
|
if parent != nil {
|
||||||
@ -602,7 +617,7 @@ func (c *Collections) addFileToTree(
|
|||||||
driveID,
|
driveID,
|
||||||
fileID,
|
fileID,
|
||||||
fileName,
|
fileName,
|
||||||
graph.ItemInfo(custom.ToCustomDriveItem(file)))
|
graph.ItemInfo(file))
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("malware file detected")
|
logger.Ctx(ctx).Infow("malware file detected")
|
||||||
|
|
||||||
@ -635,7 +650,7 @@ func (c *Collections) addFileToTree(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := tree.addFile(parentID, fileID, lastModified, fileSize)
|
err := tree.addFile(parentID, fileID, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.StackWC(ctx, err)
|
return nil, clues.StackWC(ctx, err)
|
||||||
}
|
}
|
||||||
@ -737,3 +752,121 @@ func (c *Collections) makeMetadataCollections(
|
|||||||
|
|
||||||
return append(colls, md)
|
return append(colls, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addPrevPathsToTree(
|
||||||
|
ctx context.Context,
|
||||||
|
tree *folderyMcFolderFace,
|
||||||
|
prevPaths map[string]string,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) error {
|
||||||
|
el := errs.Local()
|
||||||
|
|
||||||
|
for folderID, p := range prevPaths {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
prevPath, err := path.FromDataLayerPath(p, false)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "invalid previous path").
|
||||||
|
With("folderID", folderID, "prev_path", p).
|
||||||
|
Label(count.BadPrevPath))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tree.setPreviousPath(folderID, prevPath)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "setting previous path").
|
||||||
|
With("folderID", folderID, "prev_path", p))
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return el.Failure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Collections) turnTreeIntoCollections(
|
||||||
|
ctx context.Context,
|
||||||
|
tree *folderyMcFolderFace,
|
||||||
|
driveID string,
|
||||||
|
prevDeltaLink string,
|
||||||
|
countPagesInDelta int,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) (
|
||||||
|
[]data.BackupCollection,
|
||||||
|
map[string]string,
|
||||||
|
map[string]struct{},
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
collectables, err := tree.generateCollectables()
|
||||||
|
if err != nil {
|
||||||
|
err = clues.WrapWC(ctx, err, "generating backup collection data")
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
collections = []data.BackupCollection{}
|
||||||
|
newPrevPaths = map[string]string{}
|
||||||
|
uc *urlCache
|
||||||
|
el = errs.Local()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Attach an url cache to the drive if the number of discovered items is
|
||||||
|
// below the threshold. Attaching cache to larger drives can cause
|
||||||
|
// performance issues since cache delta queries start taking up majority of
|
||||||
|
// the hour the refreshed URLs are valid for.
|
||||||
|
if countPagesInDelta < urlCacheDriveItemThreshold {
|
||||||
|
logger.Ctx(ctx).Info("adding url cache for drive collections")
|
||||||
|
|
||||||
|
uc, err = newURLCache(
|
||||||
|
driveID,
|
||||||
|
// we need the original prevDeltaLink here; a cache update will need
|
||||||
|
// to process all changes since the start of the backup. On the bright
|
||||||
|
// side, instead of running multiple delta enumerations, all changes
|
||||||
|
// in the backup should get compressed into the single delta query, which
|
||||||
|
// ensures the two states are sufficiently consistent with just the
|
||||||
|
// original delta token.
|
||||||
|
prevDeltaLink,
|
||||||
|
urlCacheRefreshInterval,
|
||||||
|
c.handler,
|
||||||
|
c.counter.Local(),
|
||||||
|
errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, clues.StackWC(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for id, cbl := range collectables {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if cbl.currPath != nil {
|
||||||
|
newPrevPaths[id] = cbl.currPath.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
coll, err := NewCollection(
|
||||||
|
c.handler,
|
||||||
|
c.protectedResource,
|
||||||
|
cbl.currPath,
|
||||||
|
cbl.prevPath,
|
||||||
|
driveID,
|
||||||
|
c.statusUpdater,
|
||||||
|
c.ctrl,
|
||||||
|
cbl.isPackageOrChildOfPackage,
|
||||||
|
tree.hadReset,
|
||||||
|
uc,
|
||||||
|
c.counter.Local())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, clues.StackWC(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
coll.driveItems = cbl.files
|
||||||
|
|
||||||
|
collections = append(collections, coll)
|
||||||
|
}
|
||||||
|
|
||||||
|
return collections, newPrevPaths, tree.generateExcludeItemIDs(), el.Failure()
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -2,12 +2,14 @@ package drive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||||
)
|
)
|
||||||
|
|
||||||
// folderyMcFolderFace owns our delta processing tree.
|
// folderyMcFolderFace owns our delta processing tree.
|
||||||
@ -86,7 +88,7 @@ type nodeyMcNodeFace struct {
|
|||||||
// folderID -> node
|
// folderID -> node
|
||||||
children map[string]*nodeyMcNodeFace
|
children map[string]*nodeyMcNodeFace
|
||||||
// file item ID -> file metadata
|
// file item ID -> file metadata
|
||||||
files map[string]fileyMcFileFace
|
files map[string]*custom.DriveItem
|
||||||
// for special handling protocols around packages
|
// for special handling protocols around packages
|
||||||
isPackage bool
|
isPackage bool
|
||||||
}
|
}
|
||||||
@ -101,16 +103,11 @@ func newNodeyMcNodeFace(
|
|||||||
id: id,
|
id: id,
|
||||||
name: name,
|
name: name,
|
||||||
children: map[string]*nodeyMcNodeFace{},
|
children: map[string]*nodeyMcNodeFace{},
|
||||||
files: map[string]fileyMcFileFace{},
|
files: map[string]*custom.DriveItem{},
|
||||||
isPackage: isPackage,
|
isPackage: isPackage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileyMcFileFace struct {
|
|
||||||
lastModified time.Time
|
|
||||||
contentSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// folder handling
|
// folder handling
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -317,8 +314,7 @@ func (face *folderyMcFolderFace) setPreviousPath(
|
|||||||
// this func will update and/or clean up all the old references.
|
// this func will update and/or clean up all the old references.
|
||||||
func (face *folderyMcFolderFace) addFile(
|
func (face *folderyMcFolderFace) addFile(
|
||||||
parentID, id string,
|
parentID, id string,
|
||||||
lastModified time.Time,
|
file *custom.DriveItem,
|
||||||
contentSize int64,
|
|
||||||
) error {
|
) error {
|
||||||
if len(parentID) == 0 {
|
if len(parentID) == 0 {
|
||||||
return clues.New("item added without parent folder ID")
|
return clues.New("item added without parent folder ID")
|
||||||
@ -347,10 +343,7 @@ func (face *folderyMcFolderFace) addFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
face.fileIDToParentID[id] = parentID
|
face.fileIDToParentID[id] = parentID
|
||||||
parent.files[id] = fileyMcFileFace{
|
parent.files[id] = file
|
||||||
lastModified: lastModified,
|
|
||||||
contentSize: contentSize,
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(face.deletedFileIDs, id)
|
delete(face.deletedFileIDs, id)
|
||||||
|
|
||||||
@ -374,6 +367,114 @@ func (face *folderyMcFolderFace) deleteFile(id string) {
|
|||||||
face.deletedFileIDs[id] = struct{}{}
|
face.deletedFileIDs[id] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// post-processing
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type collectable struct {
|
||||||
|
currPath path.Path
|
||||||
|
files map[string]*custom.DriveItem
|
||||||
|
folderID string
|
||||||
|
isPackageOrChildOfPackage bool
|
||||||
|
loc path.Elements
|
||||||
|
prevPath path.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// produces a map of folderID -> collectable
|
||||||
|
func (face *folderyMcFolderFace) generateCollectables() (map[string]collectable, error) {
|
||||||
|
result := map[string]collectable{}
|
||||||
|
err := walkTreeAndBuildCollections(
|
||||||
|
face.root,
|
||||||
|
face.prefix,
|
||||||
|
&path.Builder{},
|
||||||
|
false,
|
||||||
|
result)
|
||||||
|
|
||||||
|
for id, tombstone := range face.tombstones {
|
||||||
|
// in case we got a folder deletion marker for a folder
|
||||||
|
// that has no previous path, drop the entry entirely.
|
||||||
|
// it doesn't exist in storage, so there's nothing to delete.
|
||||||
|
if tombstone.prev != nil {
|
||||||
|
result[id] = collectable{
|
||||||
|
folderID: id,
|
||||||
|
prevPath: tombstone.prev,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkTreeAndBuildCollections(
|
||||||
|
node *nodeyMcNodeFace,
|
||||||
|
pathPfx path.Path,
|
||||||
|
parentPath *path.Builder,
|
||||||
|
isChildOfPackage bool,
|
||||||
|
result map[string]collectable,
|
||||||
|
) error {
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parentLocation := parentPath.Elements()
|
||||||
|
currentLocation := parentPath.Append(node.name)
|
||||||
|
|
||||||
|
for _, child := range node.children {
|
||||||
|
err := walkTreeAndBuildCollections(
|
||||||
|
child,
|
||||||
|
pathPfx,
|
||||||
|
currentLocation,
|
||||||
|
node.isPackage || isChildOfPackage,
|
||||||
|
result)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
collectionPath, err := pathPfx.Append(false, currentLocation.Elements()...)
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "building collection path").
|
||||||
|
With(
|
||||||
|
"path_prefix", pathPfx,
|
||||||
|
"path_suffix", currentLocation.Elements())
|
||||||
|
}
|
||||||
|
|
||||||
|
cbl := collectable{
|
||||||
|
currPath: collectionPath,
|
||||||
|
files: node.files,
|
||||||
|
folderID: node.id,
|
||||||
|
isPackageOrChildOfPackage: node.isPackage || isChildOfPackage,
|
||||||
|
loc: parentLocation,
|
||||||
|
prevPath: node.prev,
|
||||||
|
}
|
||||||
|
|
||||||
|
result[node.id] = cbl
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (face *folderyMcFolderFace) generateExcludeItemIDs() map[string]struct{} {
|
||||||
|
result := map[string]struct{}{}
|
||||||
|
|
||||||
|
for iID, pID := range face.fileIDToParentID {
|
||||||
|
if _, itsAlive := face.folderIDToNode[pID]; !itsAlive {
|
||||||
|
// don't worry about items whose parents are tombstoned.
|
||||||
|
// those will get handled in the delete cascade.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
result[iID+metadata.DataFileSuffix] = struct{}{}
|
||||||
|
result[iID+metadata.MetaFileSuffix] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for iID := range face.deletedFileIDs {
|
||||||
|
result[iID+metadata.DataFileSuffix] = struct{}{}
|
||||||
|
result[iID+metadata.MetaFileSuffix] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// quantification
|
// quantification
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -414,7 +515,7 @@ func countFilesAndSizes(nodey *nodeyMcNodeFace) countAndSize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range nodey.files {
|
for _, file := range nodey.files {
|
||||||
sumContentSize += file.contentSize
|
sumContentSize += ptr.Val(file.GetSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
return countAndSize{
|
return countAndSize{
|
||||||
|
|||||||
@ -2,15 +2,17 @@ package drive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -51,7 +53,7 @@ func (suite *DeltaTreeUnitSuite) TestNewNodeyMcNodeFace() {
|
|||||||
assert.Equal(t, parent, nodeFace.parent)
|
assert.Equal(t, parent, nodeFace.parent)
|
||||||
assert.Equal(t, "id", nodeFace.id)
|
assert.Equal(t, "id", nodeFace.id)
|
||||||
assert.Equal(t, "name", nodeFace.name)
|
assert.Equal(t, "name", nodeFace.name)
|
||||||
assert.NotEqual(t, loc, nodeFace.prev)
|
assert.Nil(t, nodeFace.prev)
|
||||||
assert.True(t, nodeFace.isPackage)
|
assert.True(t, nodeFace.isPackage)
|
||||||
assert.NotNil(t, nodeFace.children)
|
assert.NotNil(t, nodeFace.children)
|
||||||
assert.NotNil(t, nodeFace.files)
|
assert.NotNil(t, nodeFace.files)
|
||||||
@ -66,7 +68,7 @@ func (suite *DeltaTreeUnitSuite) TestNewNodeyMcNodeFace() {
|
|||||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
tname string
|
tname string
|
||||||
tree *folderyMcFolderFace
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
parentID string
|
parentID string
|
||||||
id string
|
id string
|
||||||
name string
|
name string
|
||||||
@ -75,7 +77,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
tname: "add root",
|
tname: "add root",
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
id: rootID,
|
id: rootID,
|
||||||
name: rootName,
|
name: rootName,
|
||||||
isPackage: true,
|
isPackage: true,
|
||||||
@ -83,14 +85,14 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "root already exists",
|
tname: "root already exists",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
id: rootID,
|
id: rootID,
|
||||||
name: rootName,
|
name: rootName,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "add folder",
|
tname: "add folder",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
@ -98,7 +100,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "add package",
|
tname: "add package",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
@ -107,7 +109,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "missing ID",
|
tname: "missing ID",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
isPackage: true,
|
isPackage: true,
|
||||||
@ -115,7 +117,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "missing name",
|
tname: "missing name",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
isPackage: true,
|
isPackage: true,
|
||||||
@ -123,7 +125,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "missing parentID",
|
tname: "missing parentID",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
isPackage: true,
|
isPackage: true,
|
||||||
@ -131,7 +133,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "already tombstoned",
|
tname: "already tombstoned",
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
@ -139,8 +141,10 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "add folder before parent",
|
tname: "add folder before parent",
|
||||||
tree: &folderyMcFolderFace{
|
tree: func(t *testing.T) *folderyMcFolderFace {
|
||||||
folderIDToNode: map[string]*nodeyMcNodeFace{},
|
return &folderyMcFolderFace{
|
||||||
|
folderIDToNode: map[string]*nodeyMcNodeFace{},
|
||||||
|
}
|
||||||
},
|
},
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
@ -150,7 +154,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "folder already exists",
|
tname: "folder already exists",
|
||||||
tree: treeWithFolders(),
|
tree: treeWithFolders,
|
||||||
parentID: idx(folder, "parent"),
|
parentID: idx(folder, "parent"),
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
name: name(folder),
|
name: name(folder),
|
||||||
@ -164,7 +168,9 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
err := test.tree.setFolder(
|
tree := test.tree(t)
|
||||||
|
|
||||||
|
err := tree.setFolder(
|
||||||
ctx,
|
ctx,
|
||||||
test.parentID,
|
test.parentID,
|
||||||
test.id,
|
test.id,
|
||||||
@ -176,17 +182,17 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result := test.tree.folderIDToNode[test.id]
|
result := tree.folderIDToNode[test.id]
|
||||||
require.NotNil(t, result)
|
require.NotNil(t, result)
|
||||||
assert.Equal(t, test.id, result.id)
|
assert.Equal(t, test.id, result.id)
|
||||||
assert.Equal(t, test.name, result.name)
|
assert.Equal(t, test.name, result.name)
|
||||||
assert.Equal(t, test.isPackage, result.isPackage)
|
assert.Equal(t, test.isPackage, result.isPackage)
|
||||||
|
|
||||||
_, ded := test.tree.tombstones[test.id]
|
_, ded := tree.tombstones[test.id]
|
||||||
assert.False(t, ded)
|
assert.False(t, ded)
|
||||||
|
|
||||||
if len(test.parentID) > 0 {
|
if len(test.parentID) > 0 {
|
||||||
parent := test.tree.folderIDToNode[test.parentID]
|
parent := tree.folderIDToNode[test.parentID]
|
||||||
assert.Equal(t, parent, result.parent)
|
assert.Equal(t, parent, result.parent)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -197,36 +203,36 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddTombstone() {
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
id string
|
id string
|
||||||
tree *folderyMcFolderFace
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
expectErr assert.ErrorAssertionFunc
|
expectErr assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "add tombstone",
|
name: "add tombstone",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "duplicate tombstone",
|
name: "duplicate tombstone",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing ID",
|
name: "missing ID",
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "conflict: folder alive",
|
name: "conflict: folder alive",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "already tombstoned",
|
name: "already tombstoned",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -237,14 +243,16 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddTombstone() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
err := test.tree.setTombstone(ctx, test.id)
|
tree := test.tree(t)
|
||||||
|
|
||||||
|
err := tree.setTombstone(ctx, test.id)
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result := test.tree.tombstones[test.id]
|
result := tree.tombstones[test.id]
|
||||||
require.NotNil(t, result)
|
require.NotNil(t, result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -262,7 +270,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
name string
|
name string
|
||||||
id string
|
id string
|
||||||
prev path.Path
|
prev path.Path
|
||||||
tree *folderyMcFolderFace
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
expectErr assert.ErrorAssertionFunc
|
expectErr assert.ErrorAssertionFunc
|
||||||
expectLive bool
|
expectLive bool
|
||||||
expectTombstone bool
|
expectTombstone bool
|
||||||
@ -270,8 +278,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "no changes become a no-op",
|
name: "no changes become a no-op",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectLive: false,
|
expectLive: false,
|
||||||
expectTombstone: false,
|
expectTombstone: false,
|
||||||
@ -279,8 +287,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "added folders after reset",
|
name: "added folders after reset",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: treeWithFoldersAfterReset(),
|
tree: treeWithFoldersAfterReset,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectLive: true,
|
expectLive: true,
|
||||||
expectTombstone: false,
|
expectTombstone: false,
|
||||||
@ -288,16 +296,16 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "create tombstone after reset",
|
name: "create tombstone after reset",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: treeAfterReset(),
|
tree: treeAfterReset,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectLive: false,
|
expectLive: false,
|
||||||
expectTombstone: true,
|
expectTombstone: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing ID",
|
name: "missing ID",
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
expectLive: false,
|
expectLive: false,
|
||||||
expectTombstone: false,
|
expectTombstone: false,
|
||||||
@ -305,7 +313,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "missing prev",
|
name: "missing prev",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
tree: newFolderyMcFolderFace(nil, rootID),
|
tree: newTree,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
expectLive: false,
|
expectLive: false,
|
||||||
expectTombstone: false,
|
expectTombstone: false,
|
||||||
@ -313,8 +321,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "update live folder",
|
name: "update live folder",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: treeWithFolders(),
|
tree: treeWithFolders,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectLive: true,
|
expectLive: true,
|
||||||
expectTombstone: false,
|
expectTombstone: false,
|
||||||
@ -322,8 +330,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
{
|
{
|
||||||
name: "update tombstone",
|
name: "update tombstone",
|
||||||
id: id(folder),
|
id: id(folder),
|
||||||
prev: pathWith(loc),
|
prev: pathWith(defaultLoc()),
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
expectLive: false,
|
expectLive: false,
|
||||||
expectTombstone: true,
|
expectTombstone: true,
|
||||||
@ -332,22 +340,23 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
|||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
tree := test.tree(t)
|
||||||
|
|
||||||
err := test.tree.setPreviousPath(test.id, test.prev)
|
err := tree.setPreviousPath(test.id, test.prev)
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
if test.expectLive {
|
if test.expectLive {
|
||||||
require.Contains(t, test.tree.folderIDToNode, test.id)
|
require.Contains(t, tree.folderIDToNode, test.id)
|
||||||
assert.Equal(t, test.prev, test.tree.folderIDToNode[test.id].prev)
|
assert.Equal(t, test.prev.String(), tree.folderIDToNode[test.id].prev.String())
|
||||||
} else {
|
} else {
|
||||||
require.NotContains(t, test.tree.folderIDToNode, test.id)
|
require.NotContains(t, tree.folderIDToNode, test.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if test.expectTombstone {
|
if test.expectTombstone {
|
||||||
require.Contains(t, test.tree.tombstones, test.id)
|
require.Contains(t, tree.tombstones, test.id)
|
||||||
assert.Equal(t, test.prev, test.tree.tombstones[test.id].prev)
|
assert.Equal(t, test.prev, tree.tombstones[test.id].prev)
|
||||||
} else {
|
} else {
|
||||||
require.NotContains(t, test.tree.tombstones, test.id)
|
require.NotContains(t, tree.tombstones, test.id)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -469,7 +478,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTree()
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
|
|
||||||
set := func(
|
set := func(
|
||||||
parentID, fid, fname string,
|
parentID, fid, fname string,
|
||||||
@ -555,7 +564,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTombst
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
|
|
||||||
set := func(
|
set := func(
|
||||||
parentID, fid, fname string,
|
parentID, fid, fname string,
|
||||||
@ -730,7 +739,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTombst
|
|||||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
tname string
|
tname string
|
||||||
tree *folderyMcFolderFace
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
oldParentID string
|
oldParentID string
|
||||||
parentID string
|
parentID string
|
||||||
contentSize int64
|
contentSize int64
|
||||||
@ -739,7 +748,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
tname: "add file to root",
|
tname: "add file to root",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
contentSize: 42,
|
contentSize: 42,
|
||||||
@ -748,7 +757,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "add file to folder",
|
tname: "add file to folder",
|
||||||
tree: treeWithFolders(),
|
tree: treeWithFolders,
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
contentSize: 24,
|
contentSize: 24,
|
||||||
@ -757,7 +766,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "re-add file at the same location",
|
tname: "re-add file at the same location",
|
||||||
tree: treeWithFileAtRoot(),
|
tree: treeWithFileAtRoot,
|
||||||
oldParentID: rootID,
|
oldParentID: rootID,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
contentSize: 84,
|
contentSize: 84,
|
||||||
@ -766,7 +775,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "move file from folder to root",
|
tname: "move file from folder to root",
|
||||||
tree: treeWithFileInFolder(),
|
tree: treeWithFileInFolder,
|
||||||
oldParentID: id(folder),
|
oldParentID: id(folder),
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
contentSize: 48,
|
contentSize: 48,
|
||||||
@ -775,7 +784,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "move file from tombstone to root",
|
tname: "move file from tombstone to root",
|
||||||
tree: treeWithFileInTombstone(),
|
tree: treeWithFileInTombstone,
|
||||||
oldParentID: id(folder),
|
oldParentID: id(folder),
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
contentSize: 2,
|
contentSize: 2,
|
||||||
@ -784,7 +793,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "error adding file to tombstone",
|
tname: "error adding file to tombstone",
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
contentSize: 4,
|
contentSize: 4,
|
||||||
@ -793,7 +802,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "error adding file before parent",
|
tname: "error adding file before parent",
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: idx(folder, 1),
|
parentID: idx(folder, 1),
|
||||||
contentSize: 8,
|
contentSize: 8,
|
||||||
@ -802,7 +811,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "error adding file without parent id",
|
tname: "error adding file without parent id",
|
||||||
tree: treeWithTombstone(),
|
tree: treeWithTombstone,
|
||||||
oldParentID: "",
|
oldParentID: "",
|
||||||
parentID: "",
|
parentID: "",
|
||||||
contentSize: 16,
|
contentSize: 16,
|
||||||
@ -813,33 +822,33 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.tname, func() {
|
suite.Run(test.tname, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
tree := test.tree(t)
|
||||||
|
|
||||||
err := test.tree.addFile(
|
df := driveFile(file, parentDir(), test.parentID)
|
||||||
|
df.SetSize(ptr.To(test.contentSize))
|
||||||
|
|
||||||
|
err := tree.addFile(
|
||||||
test.parentID,
|
test.parentID,
|
||||||
id(file),
|
id(file),
|
||||||
time.Now(),
|
custom.ToCustomDriveItem(df))
|
||||||
test.contentSize)
|
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
assert.Equal(t, test.expectFiles, test.tree.fileIDToParentID)
|
assert.Equal(t, test.expectFiles, tree.fileIDToParentID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := test.tree.getNode(test.parentID)
|
parent := tree.getNode(test.parentID)
|
||||||
|
|
||||||
require.NotNil(t, parent)
|
require.NotNil(t, parent)
|
||||||
assert.Contains(t, parent.files, id(file))
|
assert.Contains(t, parent.files, id(file))
|
||||||
|
|
||||||
countSize := test.tree.countLiveFilesAndSizes()
|
countSize := tree.countLiveFilesAndSizes()
|
||||||
assert.Equal(t, 1, countSize.numFiles, "should have one file in the tree")
|
assert.Equal(t, 1, countSize.numFiles, "should have one file in the tree")
|
||||||
assert.Equal(t, test.contentSize, countSize.totalBytes, "tree should be sized to test file contents")
|
assert.Equal(t, test.contentSize, countSize.totalBytes, "tree should be sized to test file contents")
|
||||||
|
|
||||||
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
||||||
old, ok := test.tree.folderIDToNode[test.oldParentID]
|
old := tree.getNode(test.oldParentID)
|
||||||
if !ok {
|
|
||||||
old = test.tree.tombstones[test.oldParentID]
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotNil(t, old)
|
require.NotNil(t, old)
|
||||||
assert.NotContains(t, old.files, id(file))
|
assert.NotContains(t, old.files, id(file))
|
||||||
@ -851,49 +860,50 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
|||||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_DeleteFile() {
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_DeleteFile() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
tname string
|
tname string
|
||||||
tree *folderyMcFolderFace
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
parentID string
|
parentID string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
tname: "delete unseen file",
|
tname: "delete unseen file",
|
||||||
tree: treeWithRoot(),
|
tree: treeWithRoot,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "delete file from root",
|
tname: "delete file from root",
|
||||||
tree: treeWithFolders(),
|
tree: treeWithFolders,
|
||||||
parentID: rootID,
|
parentID: rootID,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "delete file from folder",
|
tname: "delete file from folder",
|
||||||
tree: treeWithFileInFolder(),
|
tree: treeWithFileInFolder,
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
tname: "delete file from tombstone",
|
tname: "delete file from tombstone",
|
||||||
tree: treeWithFileInTombstone(),
|
tree: treeWithFileInTombstone,
|
||||||
parentID: id(folder),
|
parentID: id(folder),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.tname, func() {
|
suite.Run(test.tname, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
tree := test.tree(t)
|
||||||
|
|
||||||
test.tree.deleteFile(id(file))
|
tree.deleteFile(id(file))
|
||||||
|
|
||||||
parent := test.tree.getNode(test.parentID)
|
parent := tree.getNode(test.parentID)
|
||||||
|
|
||||||
require.NotNil(t, parent)
|
require.NotNil(t, parent)
|
||||||
assert.NotContains(t, parent.files, id(file))
|
assert.NotContains(t, parent.files, id(file))
|
||||||
assert.NotContains(t, test.tree.fileIDToParentID, id(file))
|
assert.NotContains(t, tree.fileIDToParentID, id(file))
|
||||||
assert.Contains(t, test.tree.deletedFileIDs, id(file))
|
assert.Contains(t, tree.deletedFileIDs, id(file))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
fID := id(file)
|
fID := id(file)
|
||||||
|
|
||||||
require.Len(t, tree.fileIDToParentID, 0)
|
require.Len(t, tree.fileIDToParentID, 0)
|
||||||
@ -906,7 +916,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
|||||||
assert.Len(t, tree.deletedFileIDs, 1)
|
assert.Len(t, tree.deletedFileIDs, 1)
|
||||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||||
|
|
||||||
err := tree.addFile(rootID, fID, time.Now(), defaultItemSize)
|
err := tree.addFile(rootID, fID, custom.ToCustomDriveItem(fileAtRoot()))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Len(t, tree.fileIDToParentID, 1)
|
assert.Len(t, tree.fileIDToParentID, 1)
|
||||||
@ -921,3 +931,283 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
|||||||
assert.Len(t, tree.deletedFileIDs, 1)
|
assert.Len(t, tree.deletedFileIDs, 1)
|
||||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_GenerateExcludeItemIDs() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
|
expect map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no files",
|
||||||
|
tree: treeWithRoot,
|
||||||
|
expect: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one file in a folder",
|
||||||
|
tree: treeWithFileInFolder,
|
||||||
|
expect: makeExcludeMap(id(file)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one file in a tombstone",
|
||||||
|
tree: treeWithFileInTombstone,
|
||||||
|
expect: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one deleted file",
|
||||||
|
tree: treeWithDeletedFile,
|
||||||
|
expect: makeExcludeMap(idx(file, "d")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "files in folders and tombstones",
|
||||||
|
tree: fullTree,
|
||||||
|
expect: makeExcludeMap(
|
||||||
|
id(file),
|
||||||
|
idx(file, "r"),
|
||||||
|
idx(file, "p"),
|
||||||
|
idx(file, "d")),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
tree := test.tree(t)
|
||||||
|
|
||||||
|
result := tree.generateExcludeItemIDs()
|
||||||
|
assert.Equal(t, test.expect, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// post-processing tests
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_GenerateCollectables() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
tree func(t *testing.T) *folderyMcFolderFace
|
||||||
|
prevPaths map[string]string
|
||||||
|
expectErr require.ErrorAssertionFunc
|
||||||
|
expect map[string]collectable
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty tree",
|
||||||
|
tree: newTree,
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "root only",
|
||||||
|
tree: treeWithRoot,
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "root with files",
|
||||||
|
tree: treeWithFileAtRoot,
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{
|
||||||
|
id(file): custom.ToCustomDriveItem(fileAtRoot()),
|
||||||
|
},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "folder hierarchy, no previous",
|
||||||
|
tree: treeWithFileInFolder,
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
},
|
||||||
|
idx(folder, "parent"): {
|
||||||
|
currPath: fullPathPath(t, namex(folder, "parent")),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: idx(folder, "parent"),
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{rootName},
|
||||||
|
},
|
||||||
|
id(folder): {
|
||||||
|
currPath: fullPathPath(t, namex(folder, "parent"), name(folder)),
|
||||||
|
files: map[string]*custom.DriveItem{
|
||||||
|
id(file): custom.ToCustomDriveItem(fileAt("parent")),
|
||||||
|
},
|
||||||
|
folderID: id(folder),
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{rootName, namex(folder, "parent")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "package in hierarchy",
|
||||||
|
tree: func(t *testing.T) *folderyMcFolderFace {
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
tree := treeWithRoot(t)
|
||||||
|
err := tree.setFolder(ctx, rootID, id(pkg), name(pkg), true)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = tree.setFolder(ctx, id(pkg), id(folder), name(folder), false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
return tree
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
},
|
||||||
|
id(pkg): {
|
||||||
|
currPath: fullPathPath(t, name(pkg)),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: id(pkg),
|
||||||
|
isPackageOrChildOfPackage: true,
|
||||||
|
loc: path.Elements{rootName},
|
||||||
|
},
|
||||||
|
id(folder): {
|
||||||
|
currPath: fullPathPath(t, name(pkg), name(folder)),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: id(folder),
|
||||||
|
isPackageOrChildOfPackage: true,
|
||||||
|
loc: path.Elements{rootName, name(pkg)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "folder hierarchy with previous paths",
|
||||||
|
tree: treeWithFileInFolder,
|
||||||
|
expectErr: require.NoError,
|
||||||
|
prevPaths: map[string]string{
|
||||||
|
rootID: fullPath(),
|
||||||
|
idx(folder, "parent"): fullPath(namex(folder, "parent-prev")),
|
||||||
|
id(folder): fullPath(namex(folder, "parent-prev"), name(folder)),
|
||||||
|
},
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
prevPath: fullPathPath(t),
|
||||||
|
},
|
||||||
|
idx(folder, "parent"): {
|
||||||
|
currPath: fullPathPath(t, namex(folder, "parent")),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: idx(folder, "parent"),
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{rootName},
|
||||||
|
prevPath: fullPathPath(t, namex(folder, "parent-prev")),
|
||||||
|
},
|
||||||
|
id(folder): {
|
||||||
|
currPath: fullPathPath(t, namex(folder, "parent"), name(folder)),
|
||||||
|
folderID: id(folder),
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
files: map[string]*custom.DriveItem{
|
||||||
|
id(file): custom.ToCustomDriveItem(fileAt("parent")),
|
||||||
|
},
|
||||||
|
loc: path.Elements{rootName, namex(folder, "parent")},
|
||||||
|
prevPath: fullPathPath(t, namex(folder, "parent-prev"), name(folder)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "root and tombstones",
|
||||||
|
tree: treeWithFileInTombstone,
|
||||||
|
prevPaths: map[string]string{
|
||||||
|
rootID: fullPath(),
|
||||||
|
id(folder): fullPath(name(folder)),
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expect: map[string]collectable{
|
||||||
|
rootID: {
|
||||||
|
currPath: fullPathPath(t),
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: rootID,
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
loc: path.Elements{},
|
||||||
|
prevPath: fullPathPath(t),
|
||||||
|
},
|
||||||
|
id(folder): {
|
||||||
|
files: map[string]*custom.DriveItem{},
|
||||||
|
folderID: id(folder),
|
||||||
|
isPackageOrChildOfPackage: false,
|
||||||
|
prevPath: fullPathPath(t, name(folder)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
tree := test.tree(t)
|
||||||
|
|
||||||
|
if len(test.prevPaths) > 0 {
|
||||||
|
for id, ps := range test.prevPaths {
|
||||||
|
pp, err := path.FromDataLayerPath(ps, false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = tree.setPreviousPath(id, pp)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results, err := tree.generateCollectables()
|
||||||
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
assert.Len(t, results, len(test.expect))
|
||||||
|
|
||||||
|
for id, expect := range test.expect {
|
||||||
|
require.Contains(t, results, id)
|
||||||
|
|
||||||
|
result := results[id]
|
||||||
|
assert.Equal(t, id, result.folderID)
|
||||||
|
|
||||||
|
if expect.currPath == nil {
|
||||||
|
assert.Nil(t, result.currPath)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, expect.currPath.String(), result.currPath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if expect.prevPath == nil {
|
||||||
|
assert.Nil(t, result.prevPath)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, expect.prevPath.String(), result.prevPath.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if expect.loc == nil {
|
||||||
|
assert.Nil(t, result.loc)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, expect.loc.PlainString(), result.loc.PlainString())
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, maps.Keys(expect.files), maps.Keys(result.files))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
@ -30,6 +31,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/graph"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/graph"
|
||||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultItemSize int64 = 42
|
const defaultItemSize int64 = 42
|
||||||
@ -152,6 +154,7 @@ func coreItem(
|
|||||||
item := models.NewDriveItem()
|
item := models.NewDriveItem()
|
||||||
item.SetName(&name)
|
item.SetName(&name)
|
||||||
item.SetId(&id)
|
item.SetId(&id)
|
||||||
|
item.SetLastModifiedDateTime(ptr.To(time.Now()))
|
||||||
|
|
||||||
parentReference := models.NewItemReference()
|
parentReference := models.NewItemReference()
|
||||||
parentReference.SetPath(&parentPath)
|
parentReference.SetPath(&parentPath)
|
||||||
@ -178,6 +181,21 @@ func driveItem(
|
|||||||
return coreItem(id, name, parentPath, parentID, it)
|
return coreItem(id, name, parentPath, parentID, it)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func driveFile(
|
||||||
|
idX any,
|
||||||
|
parentPath, parentID string,
|
||||||
|
) models.DriveItemable {
|
||||||
|
i := id(file)
|
||||||
|
n := name(file)
|
||||||
|
|
||||||
|
if idX != file {
|
||||||
|
i = idx(file, idX)
|
||||||
|
n = namex(file, idX)
|
||||||
|
}
|
||||||
|
|
||||||
|
return driveItem(i, n, parentPath, parentID, isFile)
|
||||||
|
}
|
||||||
|
|
||||||
func fileAtRoot() models.DriveItemable {
|
func fileAtRoot() models.DriveItemable {
|
||||||
return driveItem(id(file), name(file), parentDir(), rootID, isFile)
|
return driveItem(id(file), name(file), parentDir(), rootID, isFile)
|
||||||
}
|
}
|
||||||
@ -444,6 +462,13 @@ func fullPath(elems ...string) string {
|
|||||||
elems...)...)
|
elems...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fullPathPath(t *testing.T, elems ...string) path.Path {
|
||||||
|
p, err := path.FromDataLayerPath(fullPath(elems...), false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
func driveFullPath(driveID any, elems ...string) string {
|
func driveFullPath(driveID any, elems ...string) string {
|
||||||
return toPath(append(
|
return toPath(append(
|
||||||
[]string{
|
[]string{
|
||||||
@ -468,12 +493,6 @@ func driveParentDir(driveID any, elems ...string) string {
|
|||||||
elems...)...)
|
elems...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// just for readability
|
|
||||||
const (
|
|
||||||
doMergeItems = true
|
|
||||||
doNotMergeItems = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// common item names
|
// common item names
|
||||||
const (
|
const (
|
||||||
bar = "bar"
|
bar = "bar"
|
||||||
@ -564,26 +583,6 @@ func collWithMBHAndOpts(
|
|||||||
count.New())
|
count.New())
|
||||||
}
|
}
|
||||||
|
|
||||||
// func fullOrPrevPath(
|
|
||||||
// t *testing.T,
|
|
||||||
// coll data.BackupCollection,
|
|
||||||
// ) path.Path {
|
|
||||||
// var collPath path.Path
|
|
||||||
|
|
||||||
// if coll.State() != data.DeletedState {
|
|
||||||
// collPath = coll.FullPath()
|
|
||||||
// } else {
|
|
||||||
// collPath = coll.PreviousPath()
|
|
||||||
// }
|
|
||||||
|
|
||||||
// require.False(
|
|
||||||
// t,
|
|
||||||
// len(collPath.Elements()) < 4,
|
|
||||||
// "malformed or missing collection path")
|
|
||||||
|
|
||||||
// return collPath
|
|
||||||
// }
|
|
||||||
|
|
||||||
func pagerForDrives(drives ...models.Driveable) *apiMock.Pager[models.Driveable] {
|
func pagerForDrives(drives ...models.Driveable) *apiMock.Pager[models.Driveable] {
|
||||||
return &apiMock.Pager[models.Driveable]{
|
return &apiMock.Pager[models.Driveable]{
|
||||||
ToReturn: []apiMock.PagerResult[models.Driveable]{
|
ToReturn: []apiMock.PagerResult[models.Driveable]{
|
||||||
@ -592,6 +591,30 @@ func pagerForDrives(drives ...models.Driveable) *apiMock.Pager[models.Driveable]
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func aPage(items ...models.DriveItemable) mock.NextPage {
|
||||||
|
return mock.NextPage{
|
||||||
|
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func aPageWReset(items ...models.DriveItemable) mock.NextPage {
|
||||||
|
return mock.NextPage{
|
||||||
|
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||||
|
Reset: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func aReset(items ...models.DriveItemable) mock.NextPage {
|
||||||
|
return mock.NextPage{
|
||||||
|
Items: []models.DriveItemable{},
|
||||||
|
Reset: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// metadata
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func makePrevMetadataColls(
|
func makePrevMetadataColls(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
mbh BackupHandler,
|
mbh BackupHandler,
|
||||||
@ -644,133 +667,150 @@ func makePrevMetadataColls(
|
|||||||
// assert.Equal(t, expectPrevPaths, prevs, "previous paths")
|
// assert.Equal(t, expectPrevPaths, prevs, "previous paths")
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// for comparisons done by collection state
|
// ---------------------------------------------------------------------------
|
||||||
type stateAssertion struct {
|
// collections
|
||||||
itemIDs []string
|
// ---------------------------------------------------------------------------
|
||||||
// should never get set by the user.
|
|
||||||
// this flag gets flipped when calling assertions.compare.
|
|
||||||
// any unseen collection will error on requireNoUnseenCollections
|
|
||||||
// sawCollection bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// for comparisons done by a given collection path
|
// for comparisons done by a given collection path
|
||||||
type collectionAssertion struct {
|
type collectionAssertion struct {
|
||||||
doNotMerge assert.BoolAssertionFunc
|
curr path.Path
|
||||||
states map[data.CollectionState]*stateAssertion
|
prev path.Path
|
||||||
excludedItems map[string]struct{}
|
state data.CollectionState
|
||||||
|
fileIDs []string
|
||||||
|
// should never get set by the user.
|
||||||
|
// this flag gets flipped when calling assertions.compare.
|
||||||
|
// any unseen collection will error on requireNoUnseenCollections
|
||||||
|
sawCollection bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type statesToItemIDs map[data.CollectionState][]string
|
func aColl(
|
||||||
|
curr, prev path.Path,
|
||||||
|
fileIDs ...string,
|
||||||
|
) *collectionAssertion {
|
||||||
|
ids := make([]string, 0, 2*len(fileIDs))
|
||||||
|
|
||||||
// TODO(keepers): move excludeItems to a more global position.
|
for _, fUD := range fileIDs {
|
||||||
func newCollAssertion(
|
ids = append(ids, fUD+metadata.DataFileSuffix)
|
||||||
doNotMerge bool,
|
ids = append(ids, fUD+metadata.MetaFileSuffix)
|
||||||
itemsByState statesToItemIDs,
|
|
||||||
excludeItems ...string,
|
|
||||||
) collectionAssertion {
|
|
||||||
states := map[data.CollectionState]*stateAssertion{}
|
|
||||||
|
|
||||||
for state, itemIDs := range itemsByState {
|
|
||||||
states[state] = &stateAssertion{
|
|
||||||
itemIDs: itemIDs,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dnm := assert.False
|
return &collectionAssertion{
|
||||||
if doNotMerge {
|
curr: curr,
|
||||||
dnm = assert.True
|
prev: prev,
|
||||||
}
|
state: data.StateOf(prev, curr, count.New()),
|
||||||
|
fileIDs: ids,
|
||||||
return collectionAssertion{
|
|
||||||
doNotMerge: dnm,
|
|
||||||
states: states,
|
|
||||||
excludedItems: makeExcludeMap(excludeItems...),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// to aggregate all collection-related expectations in the backup
|
// to aggregate all collection-related expectations in the backup
|
||||||
// map collection path -> collection state -> assertion
|
// map collection path -> collection state -> assertion
|
||||||
type collectionAssertions map[string]collectionAssertion
|
type expectedCollections struct {
|
||||||
|
assertions map[string]*collectionAssertion
|
||||||
|
doNotMerge assert.BoolAssertionFunc
|
||||||
|
hasURLCache assert.ValueAssertionFunc
|
||||||
|
}
|
||||||
|
|
||||||
// ensure the provided collection matches expectations as set by the test.
|
func expectCollections(
|
||||||
// func (cas collectionAssertions) compare(
|
doNotMerge bool,
|
||||||
// t *testing.T,
|
hasURLCache bool,
|
||||||
// coll data.BackupCollection,
|
colls ...*collectionAssertion,
|
||||||
// excludes *prefixmatcher.StringSetMatchBuilder,
|
) expectedCollections {
|
||||||
// ) {
|
as := map[string]*collectionAssertion{}
|
||||||
// ctx, flush := tester.NewContext(t)
|
|
||||||
// defer flush()
|
|
||||||
|
|
||||||
// var (
|
for _, coll := range colls {
|
||||||
// itemCh = coll.Items(ctx, fault.New(true))
|
as[expectFullOrPrev(coll).String()] = coll
|
||||||
// itemIDs = []string{}
|
}
|
||||||
// )
|
|
||||||
|
|
||||||
// p := fullOrPrevPath(t, coll)
|
dontMerge := assert.False
|
||||||
|
if doNotMerge {
|
||||||
|
dontMerge = assert.True
|
||||||
|
}
|
||||||
|
|
||||||
// for itm := range itemCh {
|
hasCache := assert.Nil
|
||||||
// itemIDs = append(itemIDs, itm.ID())
|
if hasURLCache {
|
||||||
// }
|
hasCache = assert.NotNil
|
||||||
|
}
|
||||||
|
|
||||||
// expect := cas[p.String()]
|
return expectedCollections{
|
||||||
// expectState := expect.states[coll.State()]
|
assertions: as,
|
||||||
// expectState.sawCollection = true
|
doNotMerge: dontMerge,
|
||||||
|
hasURLCache: hasCache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// assert.ElementsMatchf(
|
func (ecs expectedCollections) compare(
|
||||||
// t,
|
t *testing.T,
|
||||||
// expectState.itemIDs,
|
colls []data.BackupCollection,
|
||||||
// itemIDs,
|
) {
|
||||||
// "expected all items to match in collection with:\nstate %q\npath %q",
|
for _, coll := range colls {
|
||||||
// coll.State(),
|
ecs.compareColl(t, coll)
|
||||||
// p)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// expect.doNotMerge(
|
func (ecs expectedCollections) compareColl(t *testing.T, coll data.BackupCollection) {
|
||||||
// t,
|
ctx, flush := tester.NewContext(t)
|
||||||
// coll.DoNotMergeItems(),
|
defer flush()
|
||||||
// "expected collection to have the appropariate doNotMerge flag")
|
|
||||||
|
|
||||||
// if result, ok := excludes.Get(p.String()); ok {
|
var (
|
||||||
// assert.Equal(
|
itemIDs = []string{}
|
||||||
// t,
|
p = fullOrPrevPath(t, coll)
|
||||||
// expect.excludedItems,
|
)
|
||||||
// result,
|
|
||||||
// "excluded items")
|
if coll.State() != data.DeletedState {
|
||||||
// }
|
for itm := range coll.Items(ctx, fault.New(true)) {
|
||||||
// }
|
itemIDs = append(itemIDs, itm.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expect := ecs.assertions[p.String()]
|
||||||
|
require.NotNil(
|
||||||
|
t,
|
||||||
|
expect,
|
||||||
|
"test should have an expected entry for collection with:\n\tstate %q\n\tpath %q",
|
||||||
|
coll.State(),
|
||||||
|
p)
|
||||||
|
|
||||||
|
expect.sawCollection = true
|
||||||
|
|
||||||
|
assert.ElementsMatchf(
|
||||||
|
t,
|
||||||
|
expect.fileIDs,
|
||||||
|
itemIDs,
|
||||||
|
"expected all items to match in collection with:\n\tstate %q\n\tpath %q",
|
||||||
|
coll.State(),
|
||||||
|
p)
|
||||||
|
|
||||||
|
if expect.prev == nil {
|
||||||
|
assert.Nil(t, coll.PreviousPath(), "previous path")
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, expect.prev, coll.PreviousPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
if expect.curr == nil {
|
||||||
|
assert.Nil(t, coll.FullPath(), "collection path")
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, expect.curr, coll.FullPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
ecs.doNotMerge(
|
||||||
|
t,
|
||||||
|
coll.DoNotMergeItems(),
|
||||||
|
"expected collection to have the appropariate doNotMerge flag")
|
||||||
|
|
||||||
|
driveColl := coll.(*Collection)
|
||||||
|
|
||||||
|
ecs.hasURLCache(t, driveColl.urlCache, "has a populated url cache handler")
|
||||||
|
}
|
||||||
|
|
||||||
// ensure that no collections in the expected set are still flagged
|
// ensure that no collections in the expected set are still flagged
|
||||||
// as sawCollection == false.
|
// as sawCollection == false.
|
||||||
// func (cas collectionAssertions) requireNoUnseenCollections(
|
func (ecs expectedCollections) requireNoUnseenCollections(t *testing.T) {
|
||||||
// t *testing.T,
|
for _, ca := range ecs.assertions {
|
||||||
// ) {
|
require.True(
|
||||||
// for p, withPath := range cas {
|
t,
|
||||||
// for _, state := range withPath.states {
|
ca.sawCollection,
|
||||||
// require.True(
|
"results did not include collection at:\n\tstate %q\t\npath %q",
|
||||||
// t,
|
ca.state, expectFullOrPrev(ca))
|
||||||
// state.sawCollection,
|
|
||||||
// "results should have contained collection:\n\t%q\t\n%q",
|
|
||||||
// state, p)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
func aPage(items ...models.DriveItemable) mock.NextPage {
|
|
||||||
return mock.NextPage{
|
|
||||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func aPageWReset(items ...models.DriveItemable) mock.NextPage {
|
|
||||||
return mock.NextPage{
|
|
||||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
|
||||||
Reset: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func aReset(items ...models.DriveItemable) mock.NextPage {
|
|
||||||
return mock.NextPage{
|
|
||||||
Items: []models.DriveItemable{},
|
|
||||||
Reset: true,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -778,10 +818,33 @@ func aReset(items ...models.DriveItemable) mock.NextPage {
|
|||||||
// delta trees
|
// delta trees
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var loc = path.NewElements("root:/foo/bar/baz/qux/fnords/smarf/voi/zumba/bangles/howdyhowdyhowdy")
|
func defaultTreePfx(t *testing.T) path.Path {
|
||||||
|
fpb := fullPathPath(t).ToBuilder()
|
||||||
|
fpe := fpb.Elements()
|
||||||
|
fpe = fpe[:len(fpe)-1]
|
||||||
|
fpb = path.Builder{}.Append(fpe...)
|
||||||
|
|
||||||
func treeWithRoot() *folderyMcFolderFace {
|
p, err := path.FromDataLayerPath(fpb.String(), false)
|
||||||
tree := newFolderyMcFolderFace(nil, rootID)
|
require.NoErrorf(
|
||||||
|
t,
|
||||||
|
err,
|
||||||
|
"err processing path:\n\terr %+v\n\tpath %q",
|
||||||
|
clues.ToCore(err),
|
||||||
|
fpb)
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultLoc() path.Elements {
|
||||||
|
return path.NewElements("root:/foo/bar/baz/qux/fnords/smarf/voi/zumba/bangles/howdyhowdyhowdy")
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTree(t *testing.T) *folderyMcFolderFace {
|
||||||
|
return newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func treeWithRoot(t *testing.T) *folderyMcFolderFace {
|
||||||
|
tree := newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||||
rootey := newNodeyMcNodeFace(nil, rootID, rootName, false)
|
rootey := newNodeyMcNodeFace(nil, rootID, rootName, false)
|
||||||
tree.root = rootey
|
tree.root = rootey
|
||||||
tree.folderIDToNode[rootID] = rootey
|
tree.folderIDToNode[rootID] = rootey
|
||||||
@ -789,29 +852,29 @@ func treeWithRoot() *folderyMcFolderFace {
|
|||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeAfterReset() *folderyMcFolderFace {
|
func treeAfterReset(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := newFolderyMcFolderFace(nil, rootID)
|
tree := newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||||
tree.reset()
|
tree.reset()
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFoldersAfterReset() *folderyMcFolderFace {
|
func treeWithFoldersAfterReset(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithFolders()
|
tree := treeWithFolders(t)
|
||||||
tree.hadReset = true
|
tree.hadReset = true
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithTombstone() *folderyMcFolderFace {
|
func treeWithTombstone(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
tree.tombstones[id(folder)] = newNodeyMcNodeFace(nil, id(folder), "", false)
|
tree.tombstones[id(folder)] = newNodeyMcNodeFace(nil, id(folder), "", false)
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFolders() *folderyMcFolderFace {
|
func treeWithFolders(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
|
|
||||||
parent := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
parent := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
||||||
tree.folderIDToNode[parent.id] = parent
|
tree.folderIDToNode[parent.id] = parent
|
||||||
@ -824,35 +887,146 @@ func treeWithFolders() *folderyMcFolderFace {
|
|||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFileAtRoot() *folderyMcFolderFace {
|
func treeWithFileAtRoot(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithRoot()
|
tree := treeWithRoot(t)
|
||||||
tree.root.files[id(file)] = fileyMcFileFace{
|
tree.root.files[id(file)] = custom.ToCustomDriveItem(fileAtRoot())
|
||||||
lastModified: time.Now(),
|
|
||||||
contentSize: 42,
|
|
||||||
}
|
|
||||||
tree.fileIDToParentID[id(file)] = rootID
|
tree.fileIDToParentID[id(file)] = rootID
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFileInFolder() *folderyMcFolderFace {
|
func treeWithDeletedFile(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithFolders()
|
tree := treeWithRoot(t)
|
||||||
tree.folderIDToNode[id(folder)].files[id(file)] = fileyMcFileFace{
|
tree.deleteFile(idx(file, "d"))
|
||||||
lastModified: time.Now(),
|
|
||||||
contentSize: 42,
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func treeWithFileInFolder(t *testing.T) *folderyMcFolderFace {
|
||||||
|
tree := treeWithFolders(t)
|
||||||
|
tree.folderIDToNode[id(folder)].files[id(file)] = custom.ToCustomDriveItem(fileAt(folder))
|
||||||
tree.fileIDToParentID[id(file)] = id(folder)
|
tree.fileIDToParentID[id(file)] = id(folder)
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
func treeWithFileInTombstone() *folderyMcFolderFace {
|
func treeWithFileInTombstone(t *testing.T) *folderyMcFolderFace {
|
||||||
tree := treeWithTombstone()
|
tree := treeWithTombstone(t)
|
||||||
tree.tombstones[id(folder)].files[id(file)] = fileyMcFileFace{
|
tree.tombstones[id(folder)].files[id(file)] = custom.ToCustomDriveItem(fileAt("tombstone"))
|
||||||
lastModified: time.Now(),
|
|
||||||
contentSize: 42,
|
|
||||||
}
|
|
||||||
tree.fileIDToParentID[id(file)] = id(folder)
|
tree.fileIDToParentID[id(file)] = id(folder)
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// root -> idx(folder, parent) -> id(folder)
|
||||||
|
// one item at each dir
|
||||||
|
// one tombstone: idx(folder, tombstone)
|
||||||
|
// one item in the tombstone
|
||||||
|
// one deleted item
|
||||||
|
func fullTree(t *testing.T) *folderyMcFolderFace {
|
||||||
|
return fullTreeWithNames("parent", "tombstone")(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fullTreeWithNames(
|
||||||
|
parentFolderX, tombstoneX any,
|
||||||
|
) func(t *testing.T) *folderyMcFolderFace {
|
||||||
|
return func(t *testing.T) *folderyMcFolderFace {
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
tree := treeWithRoot(t)
|
||||||
|
|
||||||
|
// file in root
|
||||||
|
df := driveFile("r", parentDir(), rootID)
|
||||||
|
err := tree.addFile(
|
||||||
|
rootID,
|
||||||
|
idx(file, "r"),
|
||||||
|
custom.ToCustomDriveItem(df))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// root -> idx(folder, parent)
|
||||||
|
err = tree.setFolder(ctx, rootID, idx(folder, parentFolderX), namex(folder, parentFolderX), false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// file in idx(folder, parent)
|
||||||
|
df = driveFile("p", parentDir(namex(folder, parentFolderX)), idx(folder, parentFolderX))
|
||||||
|
err = tree.addFile(
|
||||||
|
idx(folder, parentFolderX),
|
||||||
|
idx(file, "p"),
|
||||||
|
custom.ToCustomDriveItem(df))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// idx(folder, parent) -> id(folder)
|
||||||
|
err = tree.setFolder(ctx, idx(folder, parentFolderX), id(folder), name(folder), false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// file in id(folder)
|
||||||
|
df = driveFile(file, parentDir(name(folder)), id(folder))
|
||||||
|
err = tree.addFile(
|
||||||
|
id(folder),
|
||||||
|
id(file),
|
||||||
|
custom.ToCustomDriveItem(df))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// tombstone - have to set a non-tombstone folder first, then add the item, then tombstone the folder
|
||||||
|
err = tree.setFolder(ctx, rootID, idx(folder, tombstoneX), namex(folder, tombstoneX), false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// file in tombstone
|
||||||
|
df = driveFile("t", parentDir(namex(folder, tombstoneX)), idx(folder, tombstoneX))
|
||||||
|
err = tree.addFile(
|
||||||
|
idx(folder, tombstoneX),
|
||||||
|
idx(file, "t"),
|
||||||
|
custom.ToCustomDriveItem(df))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = tree.setTombstone(ctx, idx(folder, tombstoneX))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// deleted file
|
||||||
|
tree.deleteFile(idx(file, "d"))
|
||||||
|
|
||||||
|
return tree
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// misc
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
func expectFullOrPrev(ca *collectionAssertion) path.Path {
|
||||||
|
var p path.Path
|
||||||
|
|
||||||
|
if ca.state != data.DeletedState {
|
||||||
|
p = ca.curr
|
||||||
|
} else {
|
||||||
|
p = ca.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func fullOrPrevPath(
|
||||||
|
t *testing.T,
|
||||||
|
coll data.BackupCollection,
|
||||||
|
) path.Path {
|
||||||
|
var collPath path.Path
|
||||||
|
|
||||||
|
if coll.State() == data.DeletedState {
|
||||||
|
collPath = coll.PreviousPath()
|
||||||
|
} else {
|
||||||
|
collPath = coll.FullPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotNil(
|
||||||
|
t,
|
||||||
|
collPath,
|
||||||
|
"full or prev path are nil for collection with state:\n\t%s",
|
||||||
|
coll.State())
|
||||||
|
|
||||||
|
require.False(
|
||||||
|
t,
|
||||||
|
len(collPath.Elements()) < 4,
|
||||||
|
"malformed or missing collection path")
|
||||||
|
|
||||||
|
return collPath
|
||||||
|
}
|
||||||
|
|||||||
@ -19,7 +19,10 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
urlCacheDriveItemThreshold = 300 * 1000
|
urlCacheDriveItemThreshold = 300 * 1000
|
||||||
urlCacheRefreshInterval = 1 * time.Hour
|
// 600 pages = 300k items, since delta enumeration produces 500 items per page
|
||||||
|
// TODO: export standard page size and swap to 300k/defaultDeltaPageSize
|
||||||
|
urlCacheDrivePagesThreshold = 600
|
||||||
|
urlCacheRefreshInterval = 1 * time.Hour
|
||||||
)
|
)
|
||||||
|
|
||||||
type getItemPropertyer interface {
|
type getItemPropertyer interface {
|
||||||
|
|||||||
@ -92,6 +92,7 @@ const (
|
|||||||
TotalDeltasProcessed Key = "total-deltas-processed"
|
TotalDeltasProcessed Key = "total-deltas-processed"
|
||||||
TotalFilesProcessed Key = "total-files-processed"
|
TotalFilesProcessed Key = "total-files-processed"
|
||||||
TotalFoldersProcessed Key = "total-folders-processed"
|
TotalFoldersProcessed Key = "total-folders-processed"
|
||||||
|
TotalItemsProcessed Key = "total-items-processed"
|
||||||
TotalMalwareProcessed Key = "total-malware-processed"
|
TotalMalwareProcessed Key = "total-malware-processed"
|
||||||
TotalPackagesProcessed Key = "total-packages-processed"
|
TotalPackagesProcessed Key = "total-packages-processed"
|
||||||
TotalPagesEnumerated Key = "total-pages-enumerated"
|
TotalPagesEnumerated Key = "total-pages-enumerated"
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user