add test.t and pfx to test tree generators (#4777)
this is needed to standardize the presence of a path prefix in all test-helper trees, so that we can use standard test factory helpers for producing complete post-process data. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🤖 Supportability/Tests #### Issue(s) * #4689 #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
f62760f65a
commit
b94e5a677d
@ -292,11 +292,11 @@ func DeserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
|
||||
func (c *Collections) Get(
|
||||
ctx context.Context,
|
||||
prevMetadata []data.RestoreCollection,
|
||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||
globalExcludeItemIDs *prefixmatcher.StringSetMatchBuilder,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
if c.ctrl.ToggleFeatures.UseDeltaTree {
|
||||
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, ssmb, errs)
|
||||
colls, canUsePrevBackup, err := c.getTree(ctx, prevMetadata, globalExcludeItemIDs, errs)
|
||||
if err != nil && !errors.Is(err, errGetTreeNotImplemented) {
|
||||
return nil, false, clues.Wrap(err, "processing backup using tree")
|
||||
}
|
||||
@ -457,7 +457,7 @@ func (c *Collections) Get(
|
||||
return nil, false, clues.WrapWC(ictx, err, "making exclude prefix")
|
||||
}
|
||||
|
||||
ssmb.Add(p.String(), excludedItemIDs)
|
||||
globalExcludeItemIDs.Add(p.String(), excludedItemIDs)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
@ -33,7 +34,7 @@ import (
|
||||
func (c *Collections) getTree(
|
||||
ctx context.Context,
|
||||
prevMetadata []data.RestoreCollection,
|
||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||
globalExcludeItemIDsByDrivePrefix *prefixmatcher.StringSetMatchBuilder,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
ctx = clues.AddTraceName(ctx, "GetTree")
|
||||
@ -114,6 +115,7 @@ func (c *Collections) getTree(
|
||||
prevPathsByDriveID[driveID],
|
||||
deltasByDriveID[driveID],
|
||||
limiter,
|
||||
globalExcludeItemIDsByDrivePrefix,
|
||||
cl,
|
||||
el)
|
||||
if err != nil {
|
||||
@ -168,15 +170,18 @@ func (c *Collections) makeDriveCollections(
|
||||
prevPaths map[string]string,
|
||||
prevDeltaLink string,
|
||||
limiter *pagerLimiter,
|
||||
globalExcludeItemIDsByDrivePrefix *prefixmatcher.StringSetMatchBuilder,
|
||||
counter *count.Bus,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, map[string]string, pagers.DeltaUpdate, error) {
|
||||
ppfx, err := c.handler.PathPrefix(c.tenantID, ptr.Val(drv.GetId()))
|
||||
driveID := ptr.Val(drv.GetId())
|
||||
|
||||
ppfx, err := c.handler.PathPrefix(c.tenantID, driveID)
|
||||
if err != nil {
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "generating backup tree prefix")
|
||||
}
|
||||
|
||||
root, err := c.handler.GetRootFolder(ctx, ptr.Val(drv.GetId()))
|
||||
root, err := c.handler.GetRootFolder(ctx, driveID)
|
||||
if err != nil {
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Wrap(err, "getting root folder")
|
||||
}
|
||||
@ -187,7 +192,7 @@ func (c *Collections) makeDriveCollections(
|
||||
|
||||
// --- delta item aggregation
|
||||
|
||||
du, err := c.populateTree(
|
||||
du, countPagesInDelta, err := c.populateTree(
|
||||
ctx,
|
||||
tree,
|
||||
drv,
|
||||
@ -199,74 +204,44 @@ func (c *Collections) makeDriveCollections(
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err)
|
||||
}
|
||||
|
||||
// numDriveItems := c.NumItems - numPrevItems
|
||||
// numPrevItems = c.NumItems
|
||||
|
||||
// cl.Add(count.NewPrevPaths, int64(len(newPrevPaths)))
|
||||
|
||||
// --- prev path incorporation
|
||||
|
||||
for folderID, p := range prevPaths {
|
||||
// no check for errs.Failure here, despite the addRecoverable below.
|
||||
// it's fine if we run through all of the collection generation even
|
||||
// with failures present, and let the backup finish out.
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
err = addPrevPathsToTree(
|
||||
ctx,
|
||||
tree,
|
||||
prevPaths,
|
||||
errs)
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "invalid previous path").
|
||||
With("folderID", folderID, "prev_path", p).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)
|
||||
}
|
||||
|
||||
err = tree.setPreviousPath(folderID, prevPath)
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, clues.WrapWC(ctx, err, "setting previous path").
|
||||
With("folderID", folderID, "prev_path", p).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(keepers): leaving this code around for now as a guide
|
||||
// while implementation progresses.
|
||||
|
||||
// --- post-processing
|
||||
|
||||
// Attach an url cache to the drive if the number of discovered items is
|
||||
// below the threshold. Attaching cache to larger drives can cause
|
||||
// performance issues since cache delta queries start taking up majority of
|
||||
// the hour the refreshed URLs are valid for.
|
||||
|
||||
// if numDriveItems < urlCacheDriveItemThreshold {
|
||||
// logger.Ctx(ictx).Infow(
|
||||
// "adding url cache for drive",
|
||||
// "num_drive_items", numDriveItems)
|
||||
|
||||
// uc, err := newURLCache(
|
||||
// driveID,
|
||||
// prevDeltaLink,
|
||||
// urlCacheRefreshInterval,
|
||||
// c.handler,
|
||||
// cl,
|
||||
// errs)
|
||||
// if err != nil {
|
||||
// return nil, false, clues.Stack(err)
|
||||
// }
|
||||
|
||||
// // Set the URL cache instance for all collections in this drive.
|
||||
// for id := range c.CollectionMap[driveID] {
|
||||
// c.CollectionMap[driveID][id].urlCache = uc
|
||||
// }
|
||||
// }
|
||||
|
||||
// this is a dumb hack to satisfy the linter.
|
||||
if ctx == nil {
|
||||
return nil, nil, du, nil
|
||||
collections, newPrevs, excludedItemIDs, err := c.turnTreeIntoCollections(
|
||||
ctx,
|
||||
tree,
|
||||
driveID,
|
||||
prevDeltaLink,
|
||||
countPagesInDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, pagers.DeltaUpdate{}, clues.Stack(err).Label(fault.LabelForceNoBackupCreation)
|
||||
}
|
||||
|
||||
return nil, nil, du, errGetTreeNotImplemented
|
||||
// only populate the global excluded items if no delta reset occurred.
|
||||
// if a reset did occur, the collections should already be marked as
|
||||
// "do not merge", therefore everything will get processed as a new addition.
|
||||
if !tree.hadReset {
|
||||
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
|
||||
if err != nil {
|
||||
err = clues.WrapWC(ctx, err, "making canonical path for item exclusions")
|
||||
return nil, nil, pagers.DeltaUpdate{}, err
|
||||
}
|
||||
|
||||
globalExcludeItemIDsByDrivePrefix.Add(p.String(), excludedItemIDs)
|
||||
}
|
||||
|
||||
return collections, newPrevs, du, nil
|
||||
}
|
||||
|
||||
// populateTree constructs a new tree and populates it with items
|
||||
@ -279,8 +254,8 @@ func (c *Collections) populateTree(
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
errs *fault.Bus,
|
||||
) (pagers.DeltaUpdate, error) {
|
||||
ctx = clues.Add(ctx, "invalid_prev_delta", len(prevDeltaLink) == 0)
|
||||
) (pagers.DeltaUpdate, int, error) {
|
||||
ctx = clues.Add(ctx, "has_prev_delta", len(prevDeltaLink) > 0)
|
||||
|
||||
var (
|
||||
currDeltaLink = prevDeltaLink
|
||||
@ -290,24 +265,49 @@ func (c *Collections) populateTree(
|
||||
finished bool
|
||||
hitLimit bool
|
||||
// TODO: plug this into the limiter
|
||||
maxDeltas = 100
|
||||
countDeltas = 0
|
||||
maximumTotalDeltasAllowed int64 = 100
|
||||
// pageCounter is intended as a separate local instance
|
||||
// compared to the counter we use for other item tracking.
|
||||
// IE: don't pass it around into other funcs.
|
||||
//
|
||||
// This allows us to reset pageCounter on a reset without
|
||||
// cross-contaminating other counts.
|
||||
//
|
||||
// We use this to track three keys: 1. the total number of
|
||||
// deltas enumerated (so that we don't hit an infinite
|
||||
// loop); 2. the number of pages in each delta (for the
|
||||
// limiter, but also for the URL cache so that it knows
|
||||
// if we have too many pages for it to efficiently operate);
|
||||
// and 3. the number of items in each delta (to know if we're
|
||||
// done enumerating delta queries).
|
||||
pageCounter = counter.Local()
|
||||
)
|
||||
|
||||
const (
|
||||
// track the exact number of pages across all deltas (correct across resets)
|
||||
// so that the url cache knows if it can operate within performance bounds.
|
||||
truePageCount count.Key = "pages-with-items-across-all-deltas"
|
||||
)
|
||||
|
||||
// enumerate through multiple deltas until we either:
|
||||
// 1. hit a consistent state (ie: no changes since last delta enum)
|
||||
// 2. hit the limit
|
||||
// 2. hit the limit based on the limiter
|
||||
// 3. run 100 total delta enumerations without hitting 1. (no infinite loops)
|
||||
for !hitLimit && !finished && el.Failure() == nil {
|
||||
counter.Inc(count.TotalDeltasProcessed)
|
||||
|
||||
var (
|
||||
pageCount int
|
||||
pageItemCount int
|
||||
// this is used to track stats the total number of items
|
||||
// processed in each delta. Since delta queries don't give
|
||||
// us a plain flag for "no changes occurred", we check for
|
||||
// 0 items in the delta as the "no changes occurred" state.
|
||||
// The final page of any delta query may also return 0 items,
|
||||
// so we need to combine both the item count and the deltaPageCount
|
||||
// to get a correct flag.
|
||||
iPageCounter = pageCounter.Local()
|
||||
err error
|
||||
)
|
||||
|
||||
countDeltas++
|
||||
|
||||
pager := c.handler.EnumerateDriveItemsDelta(
|
||||
ctx,
|
||||
driveID,
|
||||
@ -318,19 +318,22 @@ func (c *Collections) populateTree(
|
||||
|
||||
for page, reset, done := pager.NextPage(); !done; page, reset, done = pager.NextPage() {
|
||||
if el.Failure() != nil {
|
||||
return du, el.Failure()
|
||||
return du, 0, el.Failure()
|
||||
}
|
||||
|
||||
// track the exact number of pages within a single delta (correct across resets)
|
||||
// so that we can check for "no changes occurred" results.
|
||||
// Note: don't inc `count.TotalPagesEnumerated` outside of this (ie, for the
|
||||
// truePageCount), or else we'll double up on the inc.
|
||||
iPageCounter.Inc(count.TotalPagesEnumerated)
|
||||
|
||||
if reset {
|
||||
counter.Inc(count.PagerResets)
|
||||
tree.reset()
|
||||
c.resetStats()
|
||||
|
||||
pageCount = 0
|
||||
pageItemCount = 0
|
||||
countDeltas = 0
|
||||
} else {
|
||||
counter.Inc(count.TotalPagesEnumerated)
|
||||
pageCounter = counter.Local()
|
||||
iPageCounter = pageCounter.Local()
|
||||
}
|
||||
|
||||
err = c.enumeratePageOfItems(
|
||||
@ -350,14 +353,17 @@ func (c *Collections) populateTree(
|
||||
el.AddRecoverable(ctx, clues.Stack(err))
|
||||
}
|
||||
|
||||
pageCount++
|
||||
itemCount := int64(len(page))
|
||||
iPageCounter.Add(count.TotalItemsProcessed, itemCount)
|
||||
|
||||
pageItemCount += len(page)
|
||||
if itemCount > 0 {
|
||||
pageCounter.Inc(truePageCount)
|
||||
}
|
||||
|
||||
// Stop enumeration early if we've reached the page limit. Keep this
|
||||
// Stop enumeration early if we've reached the total page limit. Keep this
|
||||
// at the end of the loop so we don't request another page (pager.NextPage)
|
||||
// before seeing we've passed the limit.
|
||||
if limiter.hitPageLimit(pageCount) {
|
||||
if limiter.hitPageLimit(int(pageCounter.Get(truePageCount))) {
|
||||
hitLimit = true
|
||||
break
|
||||
}
|
||||
@ -370,23 +376,32 @@ func (c *Collections) populateTree(
|
||||
|
||||
du, err = pager.Results()
|
||||
if err != nil {
|
||||
return du, clues.Stack(err)
|
||||
return du, 0, clues.Stack(err)
|
||||
}
|
||||
|
||||
currDeltaLink = du.URL
|
||||
|
||||
// 0 pages is never expected. We should at least have one (empty) page to
|
||||
// consume. But checking pageCount == 1 is brittle in a non-helpful way.
|
||||
finished = pageCount < 2 && pageItemCount == 0
|
||||
finished = iPageCounter.Get(count.TotalPagesEnumerated) < 2 &&
|
||||
iPageCounter.Get(count.TotalItemsProcessed) == 0
|
||||
|
||||
if countDeltas >= maxDeltas {
|
||||
return pagers.DeltaUpdate{}, clues.New("unable to produce consistent delta after 100 queries")
|
||||
// ensure we don't enumerate more than the maximum allotted count of deltas.
|
||||
if counter.Get(count.TotalDeltasProcessed) >= maximumTotalDeltasAllowed {
|
||||
err := clues.NewWC(
|
||||
ctx,
|
||||
fmt.Sprintf("unable to produce consistent delta after %d queries", maximumTotalDeltasAllowed))
|
||||
|
||||
return pagers.DeltaUpdate{}, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow("enumerated collection delta", "stats", counter.Values())
|
||||
logger.Ctx(ctx).Infow(
|
||||
"enumerated collection delta",
|
||||
"stats", counter.Values(),
|
||||
"delta_stats", pageCounter.Values())
|
||||
|
||||
return du, el.Failure()
|
||||
return du, int(pageCounter.Get(truePageCount)), el.Failure()
|
||||
}
|
||||
|
||||
func (c *Collections) enumeratePageOfItems(
|
||||
@ -401,12 +416,13 @@ func (c *Collections) enumeratePageOfItems(
|
||||
ctx = clues.Add(ctx, "page_lenth", len(page))
|
||||
el := errs.Local()
|
||||
|
||||
for i, item := range page {
|
||||
for i, driveItem := range page {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var (
|
||||
item = custom.ToCustomDriveItem(driveItem)
|
||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||
isFile = item.GetFile() != nil
|
||||
itemID = ptr.Val(item.GetId())
|
||||
@ -452,7 +468,7 @@ func (c *Collections) addFolderToTree(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
drv models.Driveable,
|
||||
folder models.DriveItemable,
|
||||
folder *custom.DriveItem,
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
) (*fault.Skipped, error) {
|
||||
@ -501,7 +517,7 @@ func (c *Collections) addFolderToTree(
|
||||
driveID,
|
||||
folderID,
|
||||
folderName,
|
||||
graph.ItemInfo(custom.ToCustomDriveItem(folder)))
|
||||
graph.ItemInfo(folder))
|
||||
|
||||
logger.Ctx(ctx).Infow("malware folder detected")
|
||||
|
||||
@ -533,7 +549,7 @@ func (c *Collections) addFolderToTree(
|
||||
func (c *Collections) makeFolderCollectionPath(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
folder models.DriveItemable,
|
||||
folder *custom.DriveItem,
|
||||
) (path.Path, error) {
|
||||
if folder.GetRoot() != nil {
|
||||
pb := odConsts.DriveFolderPrefixBuilder(driveID)
|
||||
@ -565,7 +581,7 @@ func (c *Collections) addFileToTree(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
drv models.Driveable,
|
||||
file models.DriveItemable,
|
||||
file *custom.DriveItem,
|
||||
limiter *pagerLimiter,
|
||||
counter *count.Bus,
|
||||
) (*fault.Skipped, error) {
|
||||
@ -574,7 +590,6 @@ func (c *Collections) addFileToTree(
|
||||
fileID = ptr.Val(file.GetId())
|
||||
fileName = ptr.Val(file.GetName())
|
||||
fileSize = ptr.Val(file.GetSize())
|
||||
lastModified = ptr.Val(file.GetLastModifiedDateTime())
|
||||
isDeleted = file.GetDeleted() != nil
|
||||
isMalware = file.GetMalware() != nil
|
||||
parent = file.GetParentReference()
|
||||
@ -602,7 +617,7 @@ func (c *Collections) addFileToTree(
|
||||
driveID,
|
||||
fileID,
|
||||
fileName,
|
||||
graph.ItemInfo(custom.ToCustomDriveItem(file)))
|
||||
graph.ItemInfo(file))
|
||||
|
||||
logger.Ctx(ctx).Infow("malware file detected")
|
||||
|
||||
@ -635,7 +650,7 @@ func (c *Collections) addFileToTree(
|
||||
}
|
||||
}
|
||||
|
||||
err := tree.addFile(parentID, fileID, lastModified, fileSize)
|
||||
err := tree.addFile(parentID, fileID, file)
|
||||
if err != nil {
|
||||
return nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
@ -737,3 +752,121 @@ func (c *Collections) makeMetadataCollections(
|
||||
|
||||
return append(colls, md)
|
||||
}
|
||||
|
||||
func addPrevPathsToTree(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
prevPaths map[string]string,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
|
||||
for folderID, p := range prevPaths {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "invalid previous path").
|
||||
With("folderID", folderID, "prev_path", p).
|
||||
Label(count.BadPrevPath))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
err = tree.setPreviousPath(folderID, prevPath)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.WrapWC(ctx, err, "setting previous path").
|
||||
With("folderID", folderID, "prev_path", p))
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return el.Failure()
|
||||
}
|
||||
|
||||
func (c *Collections) turnTreeIntoCollections(
|
||||
ctx context.Context,
|
||||
tree *folderyMcFolderFace,
|
||||
driveID string,
|
||||
prevDeltaLink string,
|
||||
countPagesInDelta int,
|
||||
errs *fault.Bus,
|
||||
) (
|
||||
[]data.BackupCollection,
|
||||
map[string]string,
|
||||
map[string]struct{},
|
||||
error,
|
||||
) {
|
||||
collectables, err := tree.generateCollectables()
|
||||
if err != nil {
|
||||
err = clues.WrapWC(ctx, err, "generating backup collection data")
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
collections = []data.BackupCollection{}
|
||||
newPrevPaths = map[string]string{}
|
||||
uc *urlCache
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
// Attach an url cache to the drive if the number of discovered items is
|
||||
// below the threshold. Attaching cache to larger drives can cause
|
||||
// performance issues since cache delta queries start taking up majority of
|
||||
// the hour the refreshed URLs are valid for.
|
||||
if countPagesInDelta < urlCacheDriveItemThreshold {
|
||||
logger.Ctx(ctx).Info("adding url cache for drive collections")
|
||||
|
||||
uc, err = newURLCache(
|
||||
driveID,
|
||||
// we need the original prevDeltaLink here; a cache update will need
|
||||
// to process all changes since the start of the backup. On the bright
|
||||
// side, instead of running multiple delta enumerations, all changes
|
||||
// in the backup should get compressed into the single delta query, which
|
||||
// ensures the two states are sufficiently consistent with just the
|
||||
// original delta token.
|
||||
prevDeltaLink,
|
||||
urlCacheRefreshInterval,
|
||||
c.handler,
|
||||
c.counter.Local(),
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
for id, cbl := range collectables {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if cbl.currPath != nil {
|
||||
newPrevPaths[id] = cbl.currPath.String()
|
||||
}
|
||||
|
||||
coll, err := NewCollection(
|
||||
c.handler,
|
||||
c.protectedResource,
|
||||
cbl.currPath,
|
||||
cbl.prevPath,
|
||||
driveID,
|
||||
c.statusUpdater,
|
||||
c.ctrl,
|
||||
cbl.isPackageOrChildOfPackage,
|
||||
tree.hadReset,
|
||||
uc,
|
||||
c.counter.Local())
|
||||
if err != nil {
|
||||
return nil, nil, nil, clues.StackWC(ctx, err)
|
||||
}
|
||||
|
||||
coll.driveItems = cbl.files
|
||||
|
||||
collections = append(collections, coll)
|
||||
}
|
||||
|
||||
return collections, newPrevPaths, tree.generateExcludeItemIDs(), el.Failure()
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -2,12 +2,14 @@ package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||
)
|
||||
|
||||
// folderyMcFolderFace owns our delta processing tree.
|
||||
@ -86,7 +88,7 @@ type nodeyMcNodeFace struct {
|
||||
// folderID -> node
|
||||
children map[string]*nodeyMcNodeFace
|
||||
// file item ID -> file metadata
|
||||
files map[string]fileyMcFileFace
|
||||
files map[string]*custom.DriveItem
|
||||
// for special handling protocols around packages
|
||||
isPackage bool
|
||||
}
|
||||
@ -101,16 +103,11 @@ func newNodeyMcNodeFace(
|
||||
id: id,
|
||||
name: name,
|
||||
children: map[string]*nodeyMcNodeFace{},
|
||||
files: map[string]fileyMcFileFace{},
|
||||
files: map[string]*custom.DriveItem{},
|
||||
isPackage: isPackage,
|
||||
}
|
||||
}
|
||||
|
||||
type fileyMcFileFace struct {
|
||||
lastModified time.Time
|
||||
contentSize int64
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// folder handling
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -317,8 +314,7 @@ func (face *folderyMcFolderFace) setPreviousPath(
|
||||
// this func will update and/or clean up all the old references.
|
||||
func (face *folderyMcFolderFace) addFile(
|
||||
parentID, id string,
|
||||
lastModified time.Time,
|
||||
contentSize int64,
|
||||
file *custom.DriveItem,
|
||||
) error {
|
||||
if len(parentID) == 0 {
|
||||
return clues.New("item added without parent folder ID")
|
||||
@ -347,10 +343,7 @@ func (face *folderyMcFolderFace) addFile(
|
||||
}
|
||||
|
||||
face.fileIDToParentID[id] = parentID
|
||||
parent.files[id] = fileyMcFileFace{
|
||||
lastModified: lastModified,
|
||||
contentSize: contentSize,
|
||||
}
|
||||
parent.files[id] = file
|
||||
|
||||
delete(face.deletedFileIDs, id)
|
||||
|
||||
@ -374,6 +367,114 @@ func (face *folderyMcFolderFace) deleteFile(id string) {
|
||||
face.deletedFileIDs[id] = struct{}{}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// post-processing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type collectable struct {
|
||||
currPath path.Path
|
||||
files map[string]*custom.DriveItem
|
||||
folderID string
|
||||
isPackageOrChildOfPackage bool
|
||||
loc path.Elements
|
||||
prevPath path.Path
|
||||
}
|
||||
|
||||
// produces a map of folderID -> collectable
|
||||
func (face *folderyMcFolderFace) generateCollectables() (map[string]collectable, error) {
|
||||
result := map[string]collectable{}
|
||||
err := walkTreeAndBuildCollections(
|
||||
face.root,
|
||||
face.prefix,
|
||||
&path.Builder{},
|
||||
false,
|
||||
result)
|
||||
|
||||
for id, tombstone := range face.tombstones {
|
||||
// in case we got a folder deletion marker for a folder
|
||||
// that has no previous path, drop the entry entirely.
|
||||
// it doesn't exist in storage, so there's nothing to delete.
|
||||
if tombstone.prev != nil {
|
||||
result[id] = collectable{
|
||||
folderID: id,
|
||||
prevPath: tombstone.prev,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, clues.Stack(err).OrNil()
|
||||
}
|
||||
|
||||
func walkTreeAndBuildCollections(
|
||||
node *nodeyMcNodeFace,
|
||||
pathPfx path.Path,
|
||||
parentPath *path.Builder,
|
||||
isChildOfPackage bool,
|
||||
result map[string]collectable,
|
||||
) error {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
parentLocation := parentPath.Elements()
|
||||
currentLocation := parentPath.Append(node.name)
|
||||
|
||||
for _, child := range node.children {
|
||||
err := walkTreeAndBuildCollections(
|
||||
child,
|
||||
pathPfx,
|
||||
currentLocation,
|
||||
node.isPackage || isChildOfPackage,
|
||||
result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
collectionPath, err := pathPfx.Append(false, currentLocation.Elements()...)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "building collection path").
|
||||
With(
|
||||
"path_prefix", pathPfx,
|
||||
"path_suffix", currentLocation.Elements())
|
||||
}
|
||||
|
||||
cbl := collectable{
|
||||
currPath: collectionPath,
|
||||
files: node.files,
|
||||
folderID: node.id,
|
||||
isPackageOrChildOfPackage: node.isPackage || isChildOfPackage,
|
||||
loc: parentLocation,
|
||||
prevPath: node.prev,
|
||||
}
|
||||
|
||||
result[node.id] = cbl
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (face *folderyMcFolderFace) generateExcludeItemIDs() map[string]struct{} {
|
||||
result := map[string]struct{}{}
|
||||
|
||||
for iID, pID := range face.fileIDToParentID {
|
||||
if _, itsAlive := face.folderIDToNode[pID]; !itsAlive {
|
||||
// don't worry about items whose parents are tombstoned.
|
||||
// those will get handled in the delete cascade.
|
||||
continue
|
||||
}
|
||||
|
||||
result[iID+metadata.DataFileSuffix] = struct{}{}
|
||||
result[iID+metadata.MetaFileSuffix] = struct{}{}
|
||||
}
|
||||
|
||||
for iID := range face.deletedFileIDs {
|
||||
result[iID+metadata.DataFileSuffix] = struct{}{}
|
||||
result[iID+metadata.MetaFileSuffix] = struct{}{}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// quantification
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -414,7 +515,7 @@ func countFilesAndSizes(nodey *nodeyMcNodeFace) countAndSize {
|
||||
}
|
||||
|
||||
for _, file := range nodey.files {
|
||||
sumContentSize += file.contentSize
|
||||
sumContentSize += ptr.Val(file.GetSize())
|
||||
}
|
||||
|
||||
return countAndSize{
|
||||
|
||||
@ -2,15 +2,17 @@ package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -51,7 +53,7 @@ func (suite *DeltaTreeUnitSuite) TestNewNodeyMcNodeFace() {
|
||||
assert.Equal(t, parent, nodeFace.parent)
|
||||
assert.Equal(t, "id", nodeFace.id)
|
||||
assert.Equal(t, "name", nodeFace.name)
|
||||
assert.NotEqual(t, loc, nodeFace.prev)
|
||||
assert.Nil(t, nodeFace.prev)
|
||||
assert.True(t, nodeFace.isPackage)
|
||||
assert.NotNil(t, nodeFace.children)
|
||||
assert.NotNil(t, nodeFace.files)
|
||||
@ -66,7 +68,7 @@ func (suite *DeltaTreeUnitSuite) TestNewNodeyMcNodeFace() {
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
table := []struct {
|
||||
tname string
|
||||
tree *folderyMcFolderFace
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
parentID string
|
||||
id string
|
||||
name string
|
||||
@ -75,7 +77,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
}{
|
||||
{
|
||||
tname: "add root",
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
tree: newTree,
|
||||
id: rootID,
|
||||
name: rootName,
|
||||
isPackage: true,
|
||||
@ -83,14 +85,14 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "root already exists",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
id: rootID,
|
||||
name: rootName,
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
tname: "add folder",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
parentID: rootID,
|
||||
id: id(folder),
|
||||
name: name(folder),
|
||||
@ -98,7 +100,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "add package",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
parentID: rootID,
|
||||
id: id(folder),
|
||||
name: name(folder),
|
||||
@ -107,7 +109,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "missing ID",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
parentID: rootID,
|
||||
name: name(folder),
|
||||
isPackage: true,
|
||||
@ -115,7 +117,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "missing name",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
parentID: rootID,
|
||||
id: id(folder),
|
||||
isPackage: true,
|
||||
@ -123,7 +125,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "missing parentID",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
id: id(folder),
|
||||
name: name(folder),
|
||||
isPackage: true,
|
||||
@ -131,7 +133,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "already tombstoned",
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
parentID: rootID,
|
||||
id: id(folder),
|
||||
name: name(folder),
|
||||
@ -139,8 +141,10 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "add folder before parent",
|
||||
tree: &folderyMcFolderFace{
|
||||
tree: func(t *testing.T) *folderyMcFolderFace {
|
||||
return &folderyMcFolderFace{
|
||||
folderIDToNode: map[string]*nodeyMcNodeFace{},
|
||||
}
|
||||
},
|
||||
parentID: rootID,
|
||||
id: id(folder),
|
||||
@ -150,7 +154,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
},
|
||||
{
|
||||
tname: "folder already exists",
|
||||
tree: treeWithFolders(),
|
||||
tree: treeWithFolders,
|
||||
parentID: idx(folder, "parent"),
|
||||
id: id(folder),
|
||||
name: name(folder),
|
||||
@ -164,7 +168,9 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
err := test.tree.setFolder(
|
||||
tree := test.tree(t)
|
||||
|
||||
err := tree.setFolder(
|
||||
ctx,
|
||||
test.parentID,
|
||||
test.id,
|
||||
@ -176,17 +182,17 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder() {
|
||||
return
|
||||
}
|
||||
|
||||
result := test.tree.folderIDToNode[test.id]
|
||||
result := tree.folderIDToNode[test.id]
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, test.id, result.id)
|
||||
assert.Equal(t, test.name, result.name)
|
||||
assert.Equal(t, test.isPackage, result.isPackage)
|
||||
|
||||
_, ded := test.tree.tombstones[test.id]
|
||||
_, ded := tree.tombstones[test.id]
|
||||
assert.False(t, ded)
|
||||
|
||||
if len(test.parentID) > 0 {
|
||||
parent := test.tree.folderIDToNode[test.parentID]
|
||||
parent := tree.folderIDToNode[test.parentID]
|
||||
assert.Equal(t, parent, result.parent)
|
||||
}
|
||||
})
|
||||
@ -197,36 +203,36 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddTombstone() {
|
||||
table := []struct {
|
||||
name string
|
||||
id string
|
||||
tree *folderyMcFolderFace
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "add tombstone",
|
||||
id: id(folder),
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
tree: newTree,
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "duplicate tombstone",
|
||||
id: id(folder),
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "missing ID",
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
tree: newTree,
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "conflict: folder alive",
|
||||
id: id(folder),
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "already tombstoned",
|
||||
id: id(folder),
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
@ -237,14 +243,16 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddTombstone() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
err := test.tree.setTombstone(ctx, test.id)
|
||||
tree := test.tree(t)
|
||||
|
||||
err := tree.setTombstone(ctx, test.id)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result := test.tree.tombstones[test.id]
|
||||
result := tree.tombstones[test.id]
|
||||
require.NotNil(t, result)
|
||||
})
|
||||
}
|
||||
@ -262,7 +270,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
name string
|
||||
id string
|
||||
prev path.Path
|
||||
tree *folderyMcFolderFace
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
expectLive bool
|
||||
expectTombstone bool
|
||||
@ -270,8 +278,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "no changes become a no-op",
|
||||
id: id(folder),
|
||||
prev: pathWith(loc),
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: newTree,
|
||||
expectErr: assert.NoError,
|
||||
expectLive: false,
|
||||
expectTombstone: false,
|
||||
@ -279,8 +287,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "added folders after reset",
|
||||
id: id(folder),
|
||||
prev: pathWith(loc),
|
||||
tree: treeWithFoldersAfterReset(),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: treeWithFoldersAfterReset,
|
||||
expectErr: assert.NoError,
|
||||
expectLive: true,
|
||||
expectTombstone: false,
|
||||
@ -288,16 +296,16 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "create tombstone after reset",
|
||||
id: id(folder),
|
||||
prev: pathWith(loc),
|
||||
tree: treeAfterReset(),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: treeAfterReset,
|
||||
expectErr: assert.NoError,
|
||||
expectLive: false,
|
||||
expectTombstone: true,
|
||||
},
|
||||
{
|
||||
name: "missing ID",
|
||||
prev: pathWith(loc),
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: newTree,
|
||||
expectErr: assert.Error,
|
||||
expectLive: false,
|
||||
expectTombstone: false,
|
||||
@ -305,7 +313,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "missing prev",
|
||||
id: id(folder),
|
||||
tree: newFolderyMcFolderFace(nil, rootID),
|
||||
tree: newTree,
|
||||
expectErr: assert.Error,
|
||||
expectLive: false,
|
||||
expectTombstone: false,
|
||||
@ -313,8 +321,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "update live folder",
|
||||
id: id(folder),
|
||||
prev: pathWith(loc),
|
||||
tree: treeWithFolders(),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: treeWithFolders,
|
||||
expectErr: assert.NoError,
|
||||
expectLive: true,
|
||||
expectTombstone: false,
|
||||
@ -322,8 +330,8 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
{
|
||||
name: "update tombstone",
|
||||
id: id(folder),
|
||||
prev: pathWith(loc),
|
||||
tree: treeWithTombstone(),
|
||||
prev: pathWith(defaultLoc()),
|
||||
tree: treeWithTombstone,
|
||||
expectErr: assert.NoError,
|
||||
expectLive: false,
|
||||
expectTombstone: true,
|
||||
@ -332,22 +340,23 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetPreviousPath() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
tree := test.tree(t)
|
||||
|
||||
err := test.tree.setPreviousPath(test.id, test.prev)
|
||||
err := tree.setPreviousPath(test.id, test.prev)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if test.expectLive {
|
||||
require.Contains(t, test.tree.folderIDToNode, test.id)
|
||||
assert.Equal(t, test.prev, test.tree.folderIDToNode[test.id].prev)
|
||||
require.Contains(t, tree.folderIDToNode, test.id)
|
||||
assert.Equal(t, test.prev.String(), tree.folderIDToNode[test.id].prev.String())
|
||||
} else {
|
||||
require.NotContains(t, test.tree.folderIDToNode, test.id)
|
||||
require.NotContains(t, tree.folderIDToNode, test.id)
|
||||
}
|
||||
|
||||
if test.expectTombstone {
|
||||
require.Contains(t, test.tree.tombstones, test.id)
|
||||
assert.Equal(t, test.prev, test.tree.tombstones[test.id].prev)
|
||||
require.Contains(t, tree.tombstones, test.id)
|
||||
assert.Equal(t, test.prev, tree.tombstones[test.id].prev)
|
||||
} else {
|
||||
require.NotContains(t, test.tree.tombstones, test.id)
|
||||
require.NotContains(t, tree.tombstones, test.id)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -469,7 +478,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTree()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
tree := treeWithRoot()
|
||||
tree := treeWithRoot(t)
|
||||
|
||||
set := func(
|
||||
parentID, fid, fname string,
|
||||
@ -555,7 +564,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTombst
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
tree := treeWithRoot()
|
||||
tree := treeWithRoot(t)
|
||||
|
||||
set := func(
|
||||
parentID, fid, fname string,
|
||||
@ -730,7 +739,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_SetFolder_correctTombst
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
table := []struct {
|
||||
tname string
|
||||
tree *folderyMcFolderFace
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
oldParentID string
|
||||
parentID string
|
||||
contentSize int64
|
||||
@ -739,7 +748,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
}{
|
||||
{
|
||||
tname: "add file to root",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
oldParentID: "",
|
||||
parentID: rootID,
|
||||
contentSize: 42,
|
||||
@ -748,7 +757,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "add file to folder",
|
||||
tree: treeWithFolders(),
|
||||
tree: treeWithFolders,
|
||||
oldParentID: "",
|
||||
parentID: id(folder),
|
||||
contentSize: 24,
|
||||
@ -757,7 +766,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "re-add file at the same location",
|
||||
tree: treeWithFileAtRoot(),
|
||||
tree: treeWithFileAtRoot,
|
||||
oldParentID: rootID,
|
||||
parentID: rootID,
|
||||
contentSize: 84,
|
||||
@ -766,7 +775,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "move file from folder to root",
|
||||
tree: treeWithFileInFolder(),
|
||||
tree: treeWithFileInFolder,
|
||||
oldParentID: id(folder),
|
||||
parentID: rootID,
|
||||
contentSize: 48,
|
||||
@ -775,7 +784,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "move file from tombstone to root",
|
||||
tree: treeWithFileInTombstone(),
|
||||
tree: treeWithFileInTombstone,
|
||||
oldParentID: id(folder),
|
||||
parentID: rootID,
|
||||
contentSize: 2,
|
||||
@ -784,7 +793,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "error adding file to tombstone",
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
oldParentID: "",
|
||||
parentID: id(folder),
|
||||
contentSize: 4,
|
||||
@ -793,7 +802,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "error adding file before parent",
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
oldParentID: "",
|
||||
parentID: idx(folder, 1),
|
||||
contentSize: 8,
|
||||
@ -802,7 +811,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
},
|
||||
{
|
||||
tname: "error adding file without parent id",
|
||||
tree: treeWithTombstone(),
|
||||
tree: treeWithTombstone,
|
||||
oldParentID: "",
|
||||
parentID: "",
|
||||
contentSize: 16,
|
||||
@ -813,33 +822,33 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.tname, func() {
|
||||
t := suite.T()
|
||||
tree := test.tree(t)
|
||||
|
||||
err := test.tree.addFile(
|
||||
df := driveFile(file, parentDir(), test.parentID)
|
||||
df.SetSize(ptr.To(test.contentSize))
|
||||
|
||||
err := tree.addFile(
|
||||
test.parentID,
|
||||
id(file),
|
||||
time.Now(),
|
||||
test.contentSize)
|
||||
custom.ToCustomDriveItem(df))
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, test.expectFiles, test.tree.fileIDToParentID)
|
||||
assert.Equal(t, test.expectFiles, tree.fileIDToParentID)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
parent := test.tree.getNode(test.parentID)
|
||||
parent := tree.getNode(test.parentID)
|
||||
|
||||
require.NotNil(t, parent)
|
||||
assert.Contains(t, parent.files, id(file))
|
||||
|
||||
countSize := test.tree.countLiveFilesAndSizes()
|
||||
countSize := tree.countLiveFilesAndSizes()
|
||||
assert.Equal(t, 1, countSize.numFiles, "should have one file in the tree")
|
||||
assert.Equal(t, test.contentSize, countSize.totalBytes, "tree should be sized to test file contents")
|
||||
|
||||
if len(test.oldParentID) > 0 && test.oldParentID != test.parentID {
|
||||
old, ok := test.tree.folderIDToNode[test.oldParentID]
|
||||
if !ok {
|
||||
old = test.tree.tombstones[test.oldParentID]
|
||||
}
|
||||
old := tree.getNode(test.oldParentID)
|
||||
|
||||
require.NotNil(t, old)
|
||||
assert.NotContains(t, old.files, id(file))
|
||||
@ -851,49 +860,50 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_AddFile() {
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_DeleteFile() {
|
||||
table := []struct {
|
||||
tname string
|
||||
tree *folderyMcFolderFace
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
parentID string
|
||||
}{
|
||||
{
|
||||
tname: "delete unseen file",
|
||||
tree: treeWithRoot(),
|
||||
tree: treeWithRoot,
|
||||
parentID: rootID,
|
||||
},
|
||||
{
|
||||
tname: "delete file from root",
|
||||
tree: treeWithFolders(),
|
||||
tree: treeWithFolders,
|
||||
parentID: rootID,
|
||||
},
|
||||
{
|
||||
tname: "delete file from folder",
|
||||
tree: treeWithFileInFolder(),
|
||||
tree: treeWithFileInFolder,
|
||||
parentID: id(folder),
|
||||
},
|
||||
{
|
||||
tname: "delete file from tombstone",
|
||||
tree: treeWithFileInTombstone(),
|
||||
tree: treeWithFileInTombstone,
|
||||
parentID: id(folder),
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.tname, func() {
|
||||
t := suite.T()
|
||||
tree := test.tree(t)
|
||||
|
||||
test.tree.deleteFile(id(file))
|
||||
tree.deleteFile(id(file))
|
||||
|
||||
parent := test.tree.getNode(test.parentID)
|
||||
parent := tree.getNode(test.parentID)
|
||||
|
||||
require.NotNil(t, parent)
|
||||
assert.NotContains(t, parent.files, id(file))
|
||||
assert.NotContains(t, test.tree.fileIDToParentID, id(file))
|
||||
assert.Contains(t, test.tree.deletedFileIDs, id(file))
|
||||
assert.NotContains(t, tree.fileIDToParentID, id(file))
|
||||
assert.Contains(t, tree.deletedFileIDs, id(file))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
||||
t := suite.T()
|
||||
tree := treeWithRoot()
|
||||
tree := treeWithRoot(t)
|
||||
fID := id(file)
|
||||
|
||||
require.Len(t, tree.fileIDToParentID, 0)
|
||||
@ -906,7 +916,7 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
||||
assert.Len(t, tree.deletedFileIDs, 1)
|
||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||
|
||||
err := tree.addFile(rootID, fID, time.Now(), defaultItemSize)
|
||||
err := tree.addFile(rootID, fID, custom.ToCustomDriveItem(fileAtRoot()))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Len(t, tree.fileIDToParentID, 1)
|
||||
@ -921,3 +931,283 @@ func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_addAndDeleteFile() {
|
||||
assert.Len(t, tree.deletedFileIDs, 1)
|
||||
assert.Contains(t, tree.deletedFileIDs, fID)
|
||||
}
|
||||
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_GenerateExcludeItemIDs() {
|
||||
table := []struct {
|
||||
name string
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
expect map[string]struct{}
|
||||
}{
|
||||
{
|
||||
name: "no files",
|
||||
tree: treeWithRoot,
|
||||
expect: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "one file in a folder",
|
||||
tree: treeWithFileInFolder,
|
||||
expect: makeExcludeMap(id(file)),
|
||||
},
|
||||
{
|
||||
name: "one file in a tombstone",
|
||||
tree: treeWithFileInTombstone,
|
||||
expect: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "one deleted file",
|
||||
tree: treeWithDeletedFile,
|
||||
expect: makeExcludeMap(idx(file, "d")),
|
||||
},
|
||||
{
|
||||
name: "files in folders and tombstones",
|
||||
tree: fullTree,
|
||||
expect: makeExcludeMap(
|
||||
id(file),
|
||||
idx(file, "r"),
|
||||
idx(file, "p"),
|
||||
idx(file, "d")),
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
tree := test.tree(t)
|
||||
|
||||
result := tree.generateExcludeItemIDs()
|
||||
assert.Equal(t, test.expect, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// post-processing tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (suite *DeltaTreeUnitSuite) TestFolderyMcFolderFace_GenerateCollectables() {
|
||||
t := suite.T()
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
tree func(t *testing.T) *folderyMcFolderFace
|
||||
prevPaths map[string]string
|
||||
expectErr require.ErrorAssertionFunc
|
||||
expect map[string]collectable
|
||||
}{
|
||||
{
|
||||
name: "empty tree",
|
||||
tree: newTree,
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{},
|
||||
},
|
||||
{
|
||||
name: "root only",
|
||||
tree: treeWithRoot,
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root with files",
|
||||
tree: treeWithFileAtRoot,
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{
|
||||
id(file): custom.ToCustomDriveItem(fileAtRoot()),
|
||||
},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "folder hierarchy, no previous",
|
||||
tree: treeWithFileInFolder,
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
},
|
||||
idx(folder, "parent"): {
|
||||
currPath: fullPathPath(t, namex(folder, "parent")),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: idx(folder, "parent"),
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{rootName},
|
||||
},
|
||||
id(folder): {
|
||||
currPath: fullPathPath(t, namex(folder, "parent"), name(folder)),
|
||||
files: map[string]*custom.DriveItem{
|
||||
id(file): custom.ToCustomDriveItem(fileAt("parent")),
|
||||
},
|
||||
folderID: id(folder),
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{rootName, namex(folder, "parent")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "package in hierarchy",
|
||||
tree: func(t *testing.T) *folderyMcFolderFace {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
tree := treeWithRoot(t)
|
||||
err := tree.setFolder(ctx, rootID, id(pkg), name(pkg), true)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = tree.setFolder(ctx, id(pkg), id(folder), name(folder), false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return tree
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
},
|
||||
id(pkg): {
|
||||
currPath: fullPathPath(t, name(pkg)),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: id(pkg),
|
||||
isPackageOrChildOfPackage: true,
|
||||
loc: path.Elements{rootName},
|
||||
},
|
||||
id(folder): {
|
||||
currPath: fullPathPath(t, name(pkg), name(folder)),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: id(folder),
|
||||
isPackageOrChildOfPackage: true,
|
||||
loc: path.Elements{rootName, name(pkg)},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "folder hierarchy with previous paths",
|
||||
tree: treeWithFileInFolder,
|
||||
expectErr: require.NoError,
|
||||
prevPaths: map[string]string{
|
||||
rootID: fullPath(),
|
||||
idx(folder, "parent"): fullPath(namex(folder, "parent-prev")),
|
||||
id(folder): fullPath(namex(folder, "parent-prev"), name(folder)),
|
||||
},
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
prevPath: fullPathPath(t),
|
||||
},
|
||||
idx(folder, "parent"): {
|
||||
currPath: fullPathPath(t, namex(folder, "parent")),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: idx(folder, "parent"),
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{rootName},
|
||||
prevPath: fullPathPath(t, namex(folder, "parent-prev")),
|
||||
},
|
||||
id(folder): {
|
||||
currPath: fullPathPath(t, namex(folder, "parent"), name(folder)),
|
||||
folderID: id(folder),
|
||||
isPackageOrChildOfPackage: false,
|
||||
files: map[string]*custom.DriveItem{
|
||||
id(file): custom.ToCustomDriveItem(fileAt("parent")),
|
||||
},
|
||||
loc: path.Elements{rootName, namex(folder, "parent")},
|
||||
prevPath: fullPathPath(t, namex(folder, "parent-prev"), name(folder)),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root and tombstones",
|
||||
tree: treeWithFileInTombstone,
|
||||
prevPaths: map[string]string{
|
||||
rootID: fullPath(),
|
||||
id(folder): fullPath(name(folder)),
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expect: map[string]collectable{
|
||||
rootID: {
|
||||
currPath: fullPathPath(t),
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: rootID,
|
||||
isPackageOrChildOfPackage: false,
|
||||
loc: path.Elements{},
|
||||
prevPath: fullPathPath(t),
|
||||
},
|
||||
id(folder): {
|
||||
files: map[string]*custom.DriveItem{},
|
||||
folderID: id(folder),
|
||||
isPackageOrChildOfPackage: false,
|
||||
prevPath: fullPathPath(t, name(folder)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
tree := test.tree(t)
|
||||
|
||||
if len(test.prevPaths) > 0 {
|
||||
for id, ps := range test.prevPaths {
|
||||
pp, err := path.FromDataLayerPath(ps, false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = tree.setPreviousPath(id, pp)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
}
|
||||
|
||||
results, err := tree.generateCollectables()
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
assert.Len(t, results, len(test.expect))
|
||||
|
||||
for id, expect := range test.expect {
|
||||
require.Contains(t, results, id)
|
||||
|
||||
result := results[id]
|
||||
assert.Equal(t, id, result.folderID)
|
||||
|
||||
if expect.currPath == nil {
|
||||
assert.Nil(t, result.currPath)
|
||||
} else {
|
||||
assert.Equal(t, expect.currPath.String(), result.currPath.String())
|
||||
}
|
||||
|
||||
if expect.prevPath == nil {
|
||||
assert.Nil(t, result.prevPath)
|
||||
} else {
|
||||
assert.Equal(t, expect.prevPath.String(), result.prevPath.String())
|
||||
}
|
||||
|
||||
if expect.loc == nil {
|
||||
assert.Nil(t, result.loc)
|
||||
} else {
|
||||
assert.Equal(t, expect.loc.PlainString(), result.loc.PlainString())
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, maps.Keys(expect.files), maps.Keys(result.files))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api/graph"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/custom"
|
||||
)
|
||||
|
||||
const defaultItemSize int64 = 42
|
||||
@ -152,6 +154,7 @@ func coreItem(
|
||||
item := models.NewDriveItem()
|
||||
item.SetName(&name)
|
||||
item.SetId(&id)
|
||||
item.SetLastModifiedDateTime(ptr.To(time.Now()))
|
||||
|
||||
parentReference := models.NewItemReference()
|
||||
parentReference.SetPath(&parentPath)
|
||||
@ -178,6 +181,21 @@ func driveItem(
|
||||
return coreItem(id, name, parentPath, parentID, it)
|
||||
}
|
||||
|
||||
func driveFile(
|
||||
idX any,
|
||||
parentPath, parentID string,
|
||||
) models.DriveItemable {
|
||||
i := id(file)
|
||||
n := name(file)
|
||||
|
||||
if idX != file {
|
||||
i = idx(file, idX)
|
||||
n = namex(file, idX)
|
||||
}
|
||||
|
||||
return driveItem(i, n, parentPath, parentID, isFile)
|
||||
}
|
||||
|
||||
func fileAtRoot() models.DriveItemable {
|
||||
return driveItem(id(file), name(file), parentDir(), rootID, isFile)
|
||||
}
|
||||
@ -444,6 +462,13 @@ func fullPath(elems ...string) string {
|
||||
elems...)...)
|
||||
}
|
||||
|
||||
func fullPathPath(t *testing.T, elems ...string) path.Path {
|
||||
p, err := path.FromDataLayerPath(fullPath(elems...), false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func driveFullPath(driveID any, elems ...string) string {
|
||||
return toPath(append(
|
||||
[]string{
|
||||
@ -468,12 +493,6 @@ func driveParentDir(driveID any, elems ...string) string {
|
||||
elems...)...)
|
||||
}
|
||||
|
||||
// just for readability
|
||||
const (
|
||||
doMergeItems = true
|
||||
doNotMergeItems = false
|
||||
)
|
||||
|
||||
// common item names
|
||||
const (
|
||||
bar = "bar"
|
||||
@ -564,26 +583,6 @@ func collWithMBHAndOpts(
|
||||
count.New())
|
||||
}
|
||||
|
||||
// func fullOrPrevPath(
|
||||
// t *testing.T,
|
||||
// coll data.BackupCollection,
|
||||
// ) path.Path {
|
||||
// var collPath path.Path
|
||||
|
||||
// if coll.State() != data.DeletedState {
|
||||
// collPath = coll.FullPath()
|
||||
// } else {
|
||||
// collPath = coll.PreviousPath()
|
||||
// }
|
||||
|
||||
// require.False(
|
||||
// t,
|
||||
// len(collPath.Elements()) < 4,
|
||||
// "malformed or missing collection path")
|
||||
|
||||
// return collPath
|
||||
// }
|
||||
|
||||
func pagerForDrives(drives ...models.Driveable) *apiMock.Pager[models.Driveable] {
|
||||
return &apiMock.Pager[models.Driveable]{
|
||||
ToReturn: []apiMock.PagerResult[models.Driveable]{
|
||||
@ -592,6 +591,30 @@ func pagerForDrives(drives ...models.Driveable) *apiMock.Pager[models.Driveable]
|
||||
}
|
||||
}
|
||||
|
||||
func aPage(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||
}
|
||||
}
|
||||
|
||||
func aPageWReset(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||
Reset: true,
|
||||
}
|
||||
}
|
||||
|
||||
func aReset(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: []models.DriveItemable{},
|
||||
Reset: true,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// metadata
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func makePrevMetadataColls(
|
||||
t *testing.T,
|
||||
mbh BackupHandler,
|
||||
@ -644,133 +667,150 @@ func makePrevMetadataColls(
|
||||
// assert.Equal(t, expectPrevPaths, prevs, "previous paths")
|
||||
// }
|
||||
|
||||
// for comparisons done by collection state
|
||||
type stateAssertion struct {
|
||||
itemIDs []string
|
||||
// should never get set by the user.
|
||||
// this flag gets flipped when calling assertions.compare.
|
||||
// any unseen collection will error on requireNoUnseenCollections
|
||||
// sawCollection bool
|
||||
}
|
||||
// ---------------------------------------------------------------------------
|
||||
// collections
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// for comparisons done by a given collection path
|
||||
type collectionAssertion struct {
|
||||
doNotMerge assert.BoolAssertionFunc
|
||||
states map[data.CollectionState]*stateAssertion
|
||||
excludedItems map[string]struct{}
|
||||
curr path.Path
|
||||
prev path.Path
|
||||
state data.CollectionState
|
||||
fileIDs []string
|
||||
// should never get set by the user.
|
||||
// this flag gets flipped when calling assertions.compare.
|
||||
// any unseen collection will error on requireNoUnseenCollections
|
||||
sawCollection bool
|
||||
}
|
||||
|
||||
type statesToItemIDs map[data.CollectionState][]string
|
||||
func aColl(
|
||||
curr, prev path.Path,
|
||||
fileIDs ...string,
|
||||
) *collectionAssertion {
|
||||
ids := make([]string, 0, 2*len(fileIDs))
|
||||
|
||||
// TODO(keepers): move excludeItems to a more global position.
|
||||
func newCollAssertion(
|
||||
doNotMerge bool,
|
||||
itemsByState statesToItemIDs,
|
||||
excludeItems ...string,
|
||||
) collectionAssertion {
|
||||
states := map[data.CollectionState]*stateAssertion{}
|
||||
|
||||
for state, itemIDs := range itemsByState {
|
||||
states[state] = &stateAssertion{
|
||||
itemIDs: itemIDs,
|
||||
}
|
||||
for _, fUD := range fileIDs {
|
||||
ids = append(ids, fUD+metadata.DataFileSuffix)
|
||||
ids = append(ids, fUD+metadata.MetaFileSuffix)
|
||||
}
|
||||
|
||||
dnm := assert.False
|
||||
if doNotMerge {
|
||||
dnm = assert.True
|
||||
}
|
||||
|
||||
return collectionAssertion{
|
||||
doNotMerge: dnm,
|
||||
states: states,
|
||||
excludedItems: makeExcludeMap(excludeItems...),
|
||||
return &collectionAssertion{
|
||||
curr: curr,
|
||||
prev: prev,
|
||||
state: data.StateOf(prev, curr, count.New()),
|
||||
fileIDs: ids,
|
||||
}
|
||||
}
|
||||
|
||||
// to aggregate all collection-related expectations in the backup
|
||||
// map collection path -> collection state -> assertion
|
||||
type collectionAssertions map[string]collectionAssertion
|
||||
type expectedCollections struct {
|
||||
assertions map[string]*collectionAssertion
|
||||
doNotMerge assert.BoolAssertionFunc
|
||||
hasURLCache assert.ValueAssertionFunc
|
||||
}
|
||||
|
||||
// ensure the provided collection matches expectations as set by the test.
|
||||
// func (cas collectionAssertions) compare(
|
||||
// t *testing.T,
|
||||
// coll data.BackupCollection,
|
||||
// excludes *prefixmatcher.StringSetMatchBuilder,
|
||||
// ) {
|
||||
// ctx, flush := tester.NewContext(t)
|
||||
// defer flush()
|
||||
func expectCollections(
|
||||
doNotMerge bool,
|
||||
hasURLCache bool,
|
||||
colls ...*collectionAssertion,
|
||||
) expectedCollections {
|
||||
as := map[string]*collectionAssertion{}
|
||||
|
||||
// var (
|
||||
// itemCh = coll.Items(ctx, fault.New(true))
|
||||
// itemIDs = []string{}
|
||||
// )
|
||||
for _, coll := range colls {
|
||||
as[expectFullOrPrev(coll).String()] = coll
|
||||
}
|
||||
|
||||
// p := fullOrPrevPath(t, coll)
|
||||
dontMerge := assert.False
|
||||
if doNotMerge {
|
||||
dontMerge = assert.True
|
||||
}
|
||||
|
||||
// for itm := range itemCh {
|
||||
// itemIDs = append(itemIDs, itm.ID())
|
||||
// }
|
||||
hasCache := assert.Nil
|
||||
if hasURLCache {
|
||||
hasCache = assert.NotNil
|
||||
}
|
||||
|
||||
// expect := cas[p.String()]
|
||||
// expectState := expect.states[coll.State()]
|
||||
// expectState.sawCollection = true
|
||||
return expectedCollections{
|
||||
assertions: as,
|
||||
doNotMerge: dontMerge,
|
||||
hasURLCache: hasCache,
|
||||
}
|
||||
}
|
||||
|
||||
// assert.ElementsMatchf(
|
||||
// t,
|
||||
// expectState.itemIDs,
|
||||
// itemIDs,
|
||||
// "expected all items to match in collection with:\nstate %q\npath %q",
|
||||
// coll.State(),
|
||||
// p)
|
||||
func (ecs expectedCollections) compare(
|
||||
t *testing.T,
|
||||
colls []data.BackupCollection,
|
||||
) {
|
||||
for _, coll := range colls {
|
||||
ecs.compareColl(t, coll)
|
||||
}
|
||||
}
|
||||
|
||||
// expect.doNotMerge(
|
||||
// t,
|
||||
// coll.DoNotMergeItems(),
|
||||
// "expected collection to have the appropariate doNotMerge flag")
|
||||
func (ecs expectedCollections) compareColl(t *testing.T, coll data.BackupCollection) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// if result, ok := excludes.Get(p.String()); ok {
|
||||
// assert.Equal(
|
||||
// t,
|
||||
// expect.excludedItems,
|
||||
// result,
|
||||
// "excluded items")
|
||||
// }
|
||||
// }
|
||||
var (
|
||||
itemIDs = []string{}
|
||||
p = fullOrPrevPath(t, coll)
|
||||
)
|
||||
|
||||
if coll.State() != data.DeletedState {
|
||||
for itm := range coll.Items(ctx, fault.New(true)) {
|
||||
itemIDs = append(itemIDs, itm.ID())
|
||||
}
|
||||
}
|
||||
|
||||
expect := ecs.assertions[p.String()]
|
||||
require.NotNil(
|
||||
t,
|
||||
expect,
|
||||
"test should have an expected entry for collection with:\n\tstate %q\n\tpath %q",
|
||||
coll.State(),
|
||||
p)
|
||||
|
||||
expect.sawCollection = true
|
||||
|
||||
assert.ElementsMatchf(
|
||||
t,
|
||||
expect.fileIDs,
|
||||
itemIDs,
|
||||
"expected all items to match in collection with:\n\tstate %q\n\tpath %q",
|
||||
coll.State(),
|
||||
p)
|
||||
|
||||
if expect.prev == nil {
|
||||
assert.Nil(t, coll.PreviousPath(), "previous path")
|
||||
} else {
|
||||
assert.Equal(t, expect.prev, coll.PreviousPath())
|
||||
}
|
||||
|
||||
if expect.curr == nil {
|
||||
assert.Nil(t, coll.FullPath(), "collection path")
|
||||
} else {
|
||||
assert.Equal(t, expect.curr, coll.FullPath())
|
||||
}
|
||||
|
||||
ecs.doNotMerge(
|
||||
t,
|
||||
coll.DoNotMergeItems(),
|
||||
"expected collection to have the appropariate doNotMerge flag")
|
||||
|
||||
driveColl := coll.(*Collection)
|
||||
|
||||
ecs.hasURLCache(t, driveColl.urlCache, "has a populated url cache handler")
|
||||
}
|
||||
|
||||
// ensure that no collections in the expected set are still flagged
|
||||
// as sawCollection == false.
|
||||
// func (cas collectionAssertions) requireNoUnseenCollections(
|
||||
// t *testing.T,
|
||||
// ) {
|
||||
// for p, withPath := range cas {
|
||||
// for _, state := range withPath.states {
|
||||
// require.True(
|
||||
// t,
|
||||
// state.sawCollection,
|
||||
// "results should have contained collection:\n\t%q\t\n%q",
|
||||
// state, p)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func aPage(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||
}
|
||||
}
|
||||
|
||||
func aPageWReset(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: append([]models.DriveItemable{driveRootItem()}, items...),
|
||||
Reset: true,
|
||||
}
|
||||
}
|
||||
|
||||
func aReset(items ...models.DriveItemable) mock.NextPage {
|
||||
return mock.NextPage{
|
||||
Items: []models.DriveItemable{},
|
||||
Reset: true,
|
||||
func (ecs expectedCollections) requireNoUnseenCollections(t *testing.T) {
|
||||
for _, ca := range ecs.assertions {
|
||||
require.True(
|
||||
t,
|
||||
ca.sawCollection,
|
||||
"results did not include collection at:\n\tstate %q\t\npath %q",
|
||||
ca.state, expectFullOrPrev(ca))
|
||||
}
|
||||
}
|
||||
|
||||
@ -778,10 +818,33 @@ func aReset(items ...models.DriveItemable) mock.NextPage {
|
||||
// delta trees
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var loc = path.NewElements("root:/foo/bar/baz/qux/fnords/smarf/voi/zumba/bangles/howdyhowdyhowdy")
|
||||
func defaultTreePfx(t *testing.T) path.Path {
|
||||
fpb := fullPathPath(t).ToBuilder()
|
||||
fpe := fpb.Elements()
|
||||
fpe = fpe[:len(fpe)-1]
|
||||
fpb = path.Builder{}.Append(fpe...)
|
||||
|
||||
func treeWithRoot() *folderyMcFolderFace {
|
||||
tree := newFolderyMcFolderFace(nil, rootID)
|
||||
p, err := path.FromDataLayerPath(fpb.String(), false)
|
||||
require.NoErrorf(
|
||||
t,
|
||||
err,
|
||||
"err processing path:\n\terr %+v\n\tpath %q",
|
||||
clues.ToCore(err),
|
||||
fpb)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func defaultLoc() path.Elements {
|
||||
return path.NewElements("root:/foo/bar/baz/qux/fnords/smarf/voi/zumba/bangles/howdyhowdyhowdy")
|
||||
}
|
||||
|
||||
func newTree(t *testing.T) *folderyMcFolderFace {
|
||||
return newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||
}
|
||||
|
||||
func treeWithRoot(t *testing.T) *folderyMcFolderFace {
|
||||
tree := newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||
rootey := newNodeyMcNodeFace(nil, rootID, rootName, false)
|
||||
tree.root = rootey
|
||||
tree.folderIDToNode[rootID] = rootey
|
||||
@ -789,29 +852,29 @@ func treeWithRoot() *folderyMcFolderFace {
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeAfterReset() *folderyMcFolderFace {
|
||||
tree := newFolderyMcFolderFace(nil, rootID)
|
||||
func treeAfterReset(t *testing.T) *folderyMcFolderFace {
|
||||
tree := newFolderyMcFolderFace(defaultTreePfx(t), rootID)
|
||||
tree.reset()
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFoldersAfterReset() *folderyMcFolderFace {
|
||||
tree := treeWithFolders()
|
||||
func treeWithFoldersAfterReset(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithFolders(t)
|
||||
tree.hadReset = true
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithTombstone() *folderyMcFolderFace {
|
||||
tree := treeWithRoot()
|
||||
func treeWithTombstone(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithRoot(t)
|
||||
tree.tombstones[id(folder)] = newNodeyMcNodeFace(nil, id(folder), "", false)
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFolders() *folderyMcFolderFace {
|
||||
tree := treeWithRoot()
|
||||
func treeWithFolders(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithRoot(t)
|
||||
|
||||
parent := newNodeyMcNodeFace(tree.root, idx(folder, "parent"), namex(folder, "parent"), true)
|
||||
tree.folderIDToNode[parent.id] = parent
|
||||
@ -824,35 +887,146 @@ func treeWithFolders() *folderyMcFolderFace {
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFileAtRoot() *folderyMcFolderFace {
|
||||
tree := treeWithRoot()
|
||||
tree.root.files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
}
|
||||
func treeWithFileAtRoot(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithRoot(t)
|
||||
tree.root.files[id(file)] = custom.ToCustomDriveItem(fileAtRoot())
|
||||
tree.fileIDToParentID[id(file)] = rootID
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFileInFolder() *folderyMcFolderFace {
|
||||
tree := treeWithFolders()
|
||||
tree.folderIDToNode[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
func treeWithDeletedFile(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithRoot(t)
|
||||
tree.deleteFile(idx(file, "d"))
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFileInFolder(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithFolders(t)
|
||||
tree.folderIDToNode[id(folder)].files[id(file)] = custom.ToCustomDriveItem(fileAt(folder))
|
||||
tree.fileIDToParentID[id(file)] = id(folder)
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func treeWithFileInTombstone() *folderyMcFolderFace {
|
||||
tree := treeWithTombstone()
|
||||
tree.tombstones[id(folder)].files[id(file)] = fileyMcFileFace{
|
||||
lastModified: time.Now(),
|
||||
contentSize: 42,
|
||||
}
|
||||
func treeWithFileInTombstone(t *testing.T) *folderyMcFolderFace {
|
||||
tree := treeWithTombstone(t)
|
||||
tree.tombstones[id(folder)].files[id(file)] = custom.ToCustomDriveItem(fileAt("tombstone"))
|
||||
tree.fileIDToParentID[id(file)] = id(folder)
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
// root -> idx(folder, parent) -> id(folder)
|
||||
// one item at each dir
|
||||
// one tombstone: idx(folder, tombstone)
|
||||
// one item in the tombstone
|
||||
// one deleted item
|
||||
func fullTree(t *testing.T) *folderyMcFolderFace {
|
||||
return fullTreeWithNames("parent", "tombstone")(t)
|
||||
}
|
||||
|
||||
func fullTreeWithNames(
|
||||
parentFolderX, tombstoneX any,
|
||||
) func(t *testing.T) *folderyMcFolderFace {
|
||||
return func(t *testing.T) *folderyMcFolderFace {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
tree := treeWithRoot(t)
|
||||
|
||||
// file in root
|
||||
df := driveFile("r", parentDir(), rootID)
|
||||
err := tree.addFile(
|
||||
rootID,
|
||||
idx(file, "r"),
|
||||
custom.ToCustomDriveItem(df))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// root -> idx(folder, parent)
|
||||
err = tree.setFolder(ctx, rootID, idx(folder, parentFolderX), namex(folder, parentFolderX), false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// file in idx(folder, parent)
|
||||
df = driveFile("p", parentDir(namex(folder, parentFolderX)), idx(folder, parentFolderX))
|
||||
err = tree.addFile(
|
||||
idx(folder, parentFolderX),
|
||||
idx(file, "p"),
|
||||
custom.ToCustomDriveItem(df))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// idx(folder, parent) -> id(folder)
|
||||
err = tree.setFolder(ctx, idx(folder, parentFolderX), id(folder), name(folder), false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// file in id(folder)
|
||||
df = driveFile(file, parentDir(name(folder)), id(folder))
|
||||
err = tree.addFile(
|
||||
id(folder),
|
||||
id(file),
|
||||
custom.ToCustomDriveItem(df))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// tombstone - have to set a non-tombstone folder first, then add the item, then tombstone the folder
|
||||
err = tree.setFolder(ctx, rootID, idx(folder, tombstoneX), namex(folder, tombstoneX), false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// file in tombstone
|
||||
df = driveFile("t", parentDir(namex(folder, tombstoneX)), idx(folder, tombstoneX))
|
||||
err = tree.addFile(
|
||||
idx(folder, tombstoneX),
|
||||
idx(file, "t"),
|
||||
custom.ToCustomDriveItem(df))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = tree.setTombstone(ctx, idx(folder, tombstoneX))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// deleted file
|
||||
tree.deleteFile(idx(file, "d"))
|
||||
|
||||
return tree
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// misc
|
||||
// ---------------------------------------------------------------------------
|
||||
func expectFullOrPrev(ca *collectionAssertion) path.Path {
|
||||
var p path.Path
|
||||
|
||||
if ca.state != data.DeletedState {
|
||||
p = ca.curr
|
||||
} else {
|
||||
p = ca.prev
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func fullOrPrevPath(
|
||||
t *testing.T,
|
||||
coll data.BackupCollection,
|
||||
) path.Path {
|
||||
var collPath path.Path
|
||||
|
||||
if coll.State() == data.DeletedState {
|
||||
collPath = coll.PreviousPath()
|
||||
} else {
|
||||
collPath = coll.FullPath()
|
||||
}
|
||||
|
||||
require.NotNil(
|
||||
t,
|
||||
collPath,
|
||||
"full or prev path are nil for collection with state:\n\t%s",
|
||||
coll.State())
|
||||
|
||||
require.False(
|
||||
t,
|
||||
len(collPath.Elements()) < 4,
|
||||
"malformed or missing collection path")
|
||||
|
||||
return collPath
|
||||
}
|
||||
|
||||
@ -19,6 +19,9 @@ import (
|
||||
|
||||
const (
|
||||
urlCacheDriveItemThreshold = 300 * 1000
|
||||
// 600 pages = 300k items, since delta enumeration produces 500 items per page
|
||||
// TODO: export standard page size and swap to 300k/defaultDeltaPageSize
|
||||
urlCacheDrivePagesThreshold = 600
|
||||
urlCacheRefreshInterval = 1 * time.Hour
|
||||
)
|
||||
|
||||
|
||||
@ -92,6 +92,7 @@ const (
|
||||
TotalDeltasProcessed Key = "total-deltas-processed"
|
||||
TotalFilesProcessed Key = "total-files-processed"
|
||||
TotalFoldersProcessed Key = "total-folders-processed"
|
||||
TotalItemsProcessed Key = "total-items-processed"
|
||||
TotalMalwareProcessed Key = "total-malware-processed"
|
||||
TotalPackagesProcessed Key = "total-packages-processed"
|
||||
TotalPagesEnumerated Key = "total-pages-enumerated"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user